summaryrefslogtreecommitdiffstats
path: root/ansible_collections/netapp_eseries
diff options
context:
space:
mode:
Diffstat (limited to 'ansible_collections/netapp_eseries')
-rw-r--r--ansible_collections/netapp_eseries/santricity/.gitignore4
-rw-r--r--ansible_collections/netapp_eseries/santricity/CHANGELOG.rst234
-rw-r--r--ansible_collections/netapp_eseries/santricity/CONTRIBUTING.md37
-rw-r--r--ansible_collections/netapp_eseries/santricity/COPYING674
-rw-r--r--ansible_collections/netapp_eseries/santricity/FILES.json1867
-rw-r--r--ansible_collections/netapp_eseries/santricity/Jenkinsfile.blackduck57
-rw-r--r--ansible_collections/netapp_eseries/santricity/MANIFEST.json36
-rw-r--r--ansible_collections/netapp_eseries/santricity/README.md1053
-rw-r--r--ansible_collections/netapp_eseries/santricity/ansible.cfg7
-rw-r--r--ansible_collections/netapp_eseries/santricity/changelogs/.plugin-cache.yaml297
-rw-r--r--ansible_collections/netapp_eseries/santricity/changelogs/changelog.yaml271
-rw-r--r--ansible_collections/netapp_eseries/santricity/changelogs/config.yaml32
-rw-r--r--ansible_collections/netapp_eseries/santricity/eseries-ansible-collections-diagram.pngbin0 -> 90503 bytes
-rw-r--r--ansible_collections/netapp_eseries/santricity/meta/runtime.yml2
-rw-r--r--ansible_collections/netapp_eseries/santricity/plugins/doc_fragments/netapp.py57
-rw-r--r--ansible_collections/netapp_eseries/santricity/plugins/doc_fragments/santricity.py90
-rw-r--r--ansible_collections/netapp_eseries/santricity/plugins/lookup/santricity_host.py85
-rw-r--r--ansible_collections/netapp_eseries/santricity/plugins/lookup/santricity_host_detail.py106
-rw-r--r--ansible_collections/netapp_eseries/santricity/plugins/lookup/santricity_lun_mapping.py143
-rw-r--r--ansible_collections/netapp_eseries/santricity/plugins/lookup/santricity_storage_pool.py80
-rw-r--r--ansible_collections/netapp_eseries/santricity/plugins/lookup/santricity_volume.py128
-rw-r--r--ansible_collections/netapp_eseries/santricity/plugins/module_utils/netapp.py746
-rw-r--r--ansible_collections/netapp_eseries/santricity/plugins/module_utils/santricity.py465
-rw-r--r--ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_alerts.py253
-rw-r--r--ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_alerts_syslog.py176
-rw-r--r--ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_asup.py544
-rw-r--r--ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_auditlog.py200
-rw-r--r--ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_auth.py351
-rw-r--r--ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_client_certificate.py278
-rw-r--r--ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_discover.py332
-rw-r--r--ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_drive_firmware.py209
-rw-r--r--ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_facts.py1185
-rw-r--r--ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_firmware.py604
-rw-r--r--ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_global.py506
-rw-r--r--ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_host.py490
-rw-r--r--ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_hostgroup.py279
-rw-r--r--ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_ib_iser_interface.py257
-rw-r--r--ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_iscsi_interface.py423
-rw-r--r--ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_iscsi_target.py246
-rw-r--r--ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_ldap.py391
-rw-r--r--ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_lun_mapping.py247
-rw-r--r--ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_mgmt_interface.py656
-rw-r--r--ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_nvme_interface.py305
-rw-r--r--ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_proxy_drive_firmware_upload.py150
-rw-r--r--ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_proxy_firmware_upload.py149
-rw-r--r--ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_proxy_systems.py586
-rw-r--r--ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_server_certificate.py539
-rw-r--r--ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_snapshot.py1578
-rw-r--r--ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_storagepool.py1057
-rw-r--r--ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_syslog.py248
-rw-r--r--ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_volume.py945
-rw-r--r--ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_alerts.py286
-rw-r--r--ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_amg.py268
-rw-r--r--ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_amg_role.py244
-rw-r--r--ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_amg_sync.py267
-rw-r--r--ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_asup.py314
-rw-r--r--ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_auditlog.py286
-rw-r--r--ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_auth.py283
-rw-r--r--ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_drive_firmware.py215
-rw-r--r--ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_facts.py530
-rw-r--r--ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_firmware.py488
-rw-r--r--ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_flashcache.py442
-rw-r--r--ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_global.py159
-rw-r--r--ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_host.py544
-rw-r--r--ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_hostgroup.py307
-rw-r--r--ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_iscsi_interface.py407
-rw-r--r--ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_iscsi_target.py297
-rw-r--r--ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_ldap.py401
-rw-r--r--ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_lun_mapping.py291
-rw-r--r--ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_mgmt_interface.py723
-rw-r--r--ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_snapshot_group.py376
-rw-r--r--ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_snapshot_images.py257
-rw-r--r--ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_snapshot_volume.py289
-rw-r--r--ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_storage_system.py310
-rw-r--r--ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_storagepool.py941
-rw-r--r--ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_syslog.py286
-rw-r--r--ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_volume.py868
-rw-r--r--ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_volume_copy.py431
-rw-r--r--ansible_collections/netapp_eseries/santricity/roles/nar_santricity_common/.travis.yml29
-rw-r--r--ansible_collections/netapp_eseries/santricity/roles/nar_santricity_common/README.md149
-rw-r--r--ansible_collections/netapp_eseries/santricity/roles/nar_santricity_common/defaults/main.yml45
-rw-r--r--ansible_collections/netapp_eseries/santricity/roles/nar_santricity_common/meta/main.yml13
-rw-r--r--ansible_collections/netapp_eseries/santricity/roles/nar_santricity_common/tasks/build_info.yml38
-rw-r--r--ansible_collections/netapp_eseries/santricity/roles/nar_santricity_common/tasks/collect_facts/discovery.yml64
-rw-r--r--ansible_collections/netapp_eseries/santricity/roles/nar_santricity_common/tasks/collect_facts/prefer_embedded.yml150
-rw-r--r--ansible_collections/netapp_eseries/santricity/roles/nar_santricity_common/tasks/collect_facts/prefer_proxy.yml107
-rw-r--r--ansible_collections/netapp_eseries/santricity/roles/nar_santricity_common/tasks/collect_facts/validate_system_api_url.yml34
-rw-r--r--ansible_collections/netapp_eseries/santricity/roles/nar_santricity_common/tasks/main.yml14
-rw-r--r--ansible_collections/netapp_eseries/santricity/roles/nar_santricity_common/tasks/proxy.yml49
-rw-r--r--ansible_collections/netapp_eseries/santricity/roles/nar_santricity_common/tasks/proxy_security.yml241
-rw-r--r--ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/README.md443
-rw-r--r--ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/defaults/main.yml367
-rw-r--r--ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/meta/main.yml19
-rw-r--r--ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/tasks/initiator.yml114
-rw-r--r--ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/tasks/interface.yml27
-rw-r--r--ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/tasks/interface/ib_iser.yml29
-rw-r--r--ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/tasks/interface/iscsi.yml56
-rw-r--r--ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/tasks/interface/nvme_ib.yml29
-rw-r--r--ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/tasks/interface/nvme_roce.yml41
-rw-r--r--ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/tasks/lun_mapping.yml178
-rw-r--r--ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/tasks/main.yml52
-rw-r--r--ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/tasks/snapshot.yml19
-rw-r--r--ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/tasks/snapshot/group.yml75
-rw-r--r--ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/tasks/snapshot/rollback.yml41
-rw-r--r--ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/tasks/snapshot/view.yml116
-rw-r--r--ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/tasks/storage_pool_absent.yml27
-rw-r--r--ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/tasks/storage_pool_present.yml28
-rw-r--r--ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/tasks/volume.yml34
-rw-r--r--ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/templates/hostnqn.j21
-rw-r--r--ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/templates/initiatorname_iscsi.j211
-rw-r--r--ansible_collections/netapp_eseries/santricity/roles/nar_santricity_management/.travis.yml29
-rw-r--r--ansible_collections/netapp_eseries/santricity/roles/nar_santricity_management/README.md301
-rw-r--r--ansible_collections/netapp_eseries/santricity/roles/nar_santricity_management/defaults/main.yml197
-rw-r--r--ansible_collections/netapp_eseries/santricity/roles/nar_santricity_management/meta/main.yml13
-rw-r--r--ansible_collections/netapp_eseries/santricity/roles/nar_santricity_management/tasks/firmware.yml83
-rw-r--r--ansible_collections/netapp_eseries/santricity/roles/nar_santricity_management/tasks/interface.yml171
-rw-r--r--ansible_collections/netapp_eseries/santricity/roles/nar_santricity_management/tasks/logging.yml95
-rw-r--r--ansible_collections/netapp_eseries/santricity/roles/nar_santricity_management/tasks/main.yml27
-rw-r--r--ansible_collections/netapp_eseries/santricity/roles/nar_santricity_management/tasks/security.yml213
-rw-r--r--ansible_collections/netapp_eseries/santricity/roles/nar_santricity_management/tasks/system.yml26
-rw-r--r--ansible_collections/netapp_eseries/santricity/tests/integration/integration_config.yml32
-rw-r--r--ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_alerts/tasks/main.yml117
-rw-r--r--ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_alerts_syslog/tasks/main.yml112
-rw-r--r--ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_asup/tasks/main.yml287
-rw-r--r--ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_auditlog/tasks/main.yml220
-rw-r--r--ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_auth/tasks/main.yml170
-rw-r--r--ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_client_certificate/tasks/main.yml55
-rw-r--r--ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_discover/tasks/main.yml64
-rw-r--r--ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_drive_firmware/tasks/main.yml185
-rw-r--r--ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_facts/tasks/main.yml19
-rw-r--r--ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_firmware/tasks/firmware_legacy_tests.yml128
-rw-r--r--ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_firmware/tasks/firmware_tests.yml320
-rw-r--r--ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_firmware/tasks/main.yml2
-rw-r--r--ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_global/tasks/main.yml185
-rw-r--r--ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_host/tasks/main.yml243
-rw-r--r--ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_hostgroup/tasks/main.yml137
-rw-r--r--ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_ib_iser_interface/tasks/main.yml88
-rw-r--r--ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_iscsi_interface/tasks/main.yml115
-rw-r--r--ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_iscsi_target/tasks/main.yml81
-rw-r--r--ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_ldap/tasks/main.yml104
-rw-r--r--ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_lun_mapping/tasks/main.yml318
-rw-r--r--ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_mgmt_interface/tasks/main.yml383
-rw-r--r--ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_nvme_interface/tasks/ib.yml88
-rw-r--r--ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_nvme_interface/tasks/main.yml2
-rw-r--r--ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_nvme_interface/tasks/roce.yml105
-rw-r--r--ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_proxy_drive_firmware_upload/tasks/main.yml65
-rw-r--r--ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_proxy_firmware_upload/tasks/main.yml65
-rw-r--r--ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_proxy_systems/tasks/main.yml160
-rw-r--r--ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_storagepool/tasks/main.yml1038
-rw-r--r--ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_syslog/tasks/main.yml127
-rw-r--r--ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_volume/tasks/main.yml768
-rw-r--r--ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_alerts.py194
-rw-r--r--ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_alerts_syslog.py151
-rw-r--r--ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_asup.py318
-rw-r--r--ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_auditlog.py205
-rw-r--r--ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_auth.py488
-rw-r--r--ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_client_certificate.py373
-rw-r--r--ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_discover.py168
-rw-r--r--ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_drive_firmware.py212
-rw-r--r--ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_facts.py470
-rw-r--r--ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_firmware.py494
-rw-r--r--ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_global.py494
-rw-r--r--ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_host.py434
-rw-r--r--ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_hostgroup.py140
-rw-r--r--ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_ib_iser_interface.py159
-rw-r--r--ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_iscsi_interface.py239
-rw-r--r--ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_iscsi_target.py188
-rw-r--r--ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_ldap.py371
-rw-r--r--ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_lun_mapping.py196
-rw-r--r--ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_mgmt_interface.py513
-rw-r--r--ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_nvme_interface.py220
-rw-r--r--ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_proxy_drive_firmware_upload.py137
-rw-r--r--ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_proxy_firmware_upload.py136
-rw-r--r--ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_proxy_systems.py497
-rw-r--r--ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_storagepool.py715
-rw-r--r--ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_syslog.py128
-rw-r--r--ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_volume.py864
-rw-r--r--ansible_collections/netapp_eseries/santricity/vars/hubPreCheck.groovy8
-rw-r--r--ansible_collections/netapp_eseries/santricity/vars/hubScan.groovy13
-rw-r--r--ansible_collections/netapp_eseries/santricity/vars/hubScanDocker.groovy76
-rw-r--r--ansible_collections/netapp_eseries/santricity/vars/hubScanProject.groovy123
-rw-r--r--ansible_collections/netapp_eseries/santricity/vars/setupBlackduckBuildParameters.groovy16
-rw-r--r--ansible_collections/netapp_eseries/santricity/vars/setupBuildParameters.groovy3
-rw-r--r--ansible_collections/netapp_eseries/santricity/vars/setupSynopsysDetect.groovy15
184 files changed, 48501 insertions, 0 deletions
diff --git a/ansible_collections/netapp_eseries/santricity/.gitignore b/ansible_collections/netapp_eseries/santricity/.gitignore
new file mode 100644
index 000000000..85d9054e5
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/.gitignore
@@ -0,0 +1,4 @@
+*.pyc
+.idea
+*.iml
+ansible.cfg \ No newline at end of file
diff --git a/ansible_collections/netapp_eseries/santricity/CHANGELOG.rst b/ansible_collections/netapp_eseries/santricity/CHANGELOG.rst
new file mode 100644
index 000000000..8b5e5f01d
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/CHANGELOG.rst
@@ -0,0 +1,234 @@
+===================================================
+Netapp E-Series SANtricity Collection Release Notes
+===================================================
+
+.. contents:: Topics
+
+
+v1.4.0
+======
+
+Minor Changes
+-------------
+
+- netapp_eseries.santricity.na_santricity_iscsi_interface - Add support of iSCSI HIC speed.
+- netapp_eseries.santricity.nar_santricity_host - Add support of iSCSI HIC speed.
+
+Bugfixes
+--------
+
+- netapp_eseries.santricity.na_santricity_mgmt_interface - Add the ability to configure DNS, NTP and SSH separately from management interfaces.
+- netapp_eseries.santricity.nar_santricity_host - Fix default MTU value for NVMe RoCE.
+- netapp_eseries.santricity.nar_santricity_management - Add tasks to set DNS, NTP and SSH globally separately from management interfaces.
+
+v1.3.1
+======
+
+Minor Changes
+-------------
+
+- Require Ansible 2.10 or later.
+- na_santricity_volume - Add size_tolerance option to handle the difference in volume size with SANtricity System Manager.
+- nar_santricity_common - utilize provided eseries management information to determine network to search.
+
+Bugfixes
+--------
+
+- na_santricity_mgmt_interface - Fix default required_if state option for na_santricity_mgmt_interface
+- netapp_eseries.santricity.nar_santricity_host - Fix default MTU value for NVMe RoCE.
+
+v1.3.0
+======
+
+Minor Changes
+-------------
+
+- na_santricity_global - Add controller_shelf_id argument to set controller shelf identifier.
+- na_santricity_volume - Add flag to control whether volume expansion operations are allowed.
+- na_santricity_volume - Add volume write cache mirroring option.
+- nar_santricity_host - Add volume write cache mirroring options.
+
+Bugfixes
+--------
+
+- santricity_host - Ensure a list of volumes are provided to prevent netapp_eseries.santricity.santricity_host (lookup) index is string not integer exception.
+
+v1.2.13
+=======
+
+Bugfixes
+--------
+
+- Fix availability of client certificate change.
+
+v1.2.12
+=======
+
+Bugfixes
+--------
+
+- Fix host and host port names from being changed to lower case.
+
+v1.2.11
+=======
+
+Bugfixes
+--------
+
+- Fix login banner message option bytes error in na_santricity_global.
+
+v1.2.10
+=======
+
+Minor Changes
+-------------
+
+- Add login banner message to na_santricity_global module and nar_santricity_management role.
+- Add usable drive option for na_santricity_storagepool module and nar_santricity_host role which can be used to choose selected drives for storage pool/volumes or define a pattern drive selection.
+
+Bugfixes
+--------
+
+- Fix PEM certificate/key imports in the na_santricity_server_certificate module.
+- Fix na_santricity_mgmt_interface IPv4 and IPv6 form validation.
+
+v1.2.9
+======
+
+Minor Changes
+-------------
+
+- Add eseries_system_old_password variable to faciliate changing the storage system's admin password.
+- Add remove_unspecified_user_certificates variable to the client certificates module.
+
+Bugfixes
+--------
+
+- Fix missing proxy client and server certificate in management role.
+- Fix missing proxy validate_certs and change current proxy password variables.
+- Fix server certificate module not forwarding certificate imports to the embedded web services.
+
+v1.2.8
+======
+
+Bugfixes
+--------
+
+- Fix pkcs8 private key passphrase issue.
+- Fix storage system admin password change from web services proxy in na_santricity_auth module.
+
+v1.2.7
+======
+
+v1.2.6
+======
+
+Bugfixes
+--------
+
+- Fix jinja issue with collecting certificates paths in nar_santricity_management role.
+
+v1.2.5
+======
+
+Bugfixes
+--------
+
+- Add missing http(s) proxy username and password parameters from na_santricity_asup module and nar_santricity_management role."
+- Add missing storage pool configuration parameter, criteria_drive_interface_type, to nar_santricity_host role.
+
+v1.2.4
+======
+
+v1.2.3
+======
+
+Minor Changes
+-------------
+
+- Added nvme4k as a drive type interface to the na_santricity_storagepool module.
+- Added options for critical and warning threshold setting in na_santricity_storagepool module and nar_santricity_host role.
+- Fix dynamic disk pool critical and warning threshold settings.
+
+Bugfixes
+--------
+
+- Fix drive firmware upgrade issue that prevented updating firware when drive was in use.
+
+v1.2.2
+======
+
+v1.2.1
+======
+
+Release Summary
+---------------
+
+Release 1.2.2 simply removes resource-provisioned volumes feature from collection.
+
+Minor Changes
+-------------
+
+- Add IPv6 and FQDN support for NTP
+- Add IPv6 support for DNS
+- Add criteria_drive_max_size option to na_santricity_storagepool and nar_santricity_host role.
+- Add resource-provisioned volumes option to globals and nar_santricity_management role.
+- Remove resource-provisioned volumes setting from na_santicity_global module and nar_santricity_management role."
+
+v1.2.0
+======
+
+Release Summary
+---------------
+
+1.2.0 release of ``netapp_eseries.santricity`` collection on 2021-03-01.
+
+Minor Changes
+-------------
+
+- na_santricity_discover - Add support for discovering storage systems directly using devmgr/v2/storage-systems/1/about endpoint since its old method of discover is being deprecated.
+- na_santricity_facts - Add storage system information to facilitate ``netapp_eseries.host`` collection various protocol configuration.
+- na_santricity_server_certificate - New module to configure storage system's web server certificate configuration.
+- na_santricity_snapshot - New module to configure NetApp E-Series Snapshot consistency groups any number of base volumes.
+- na_santricity_volume - Add percentage size unit (pct) and which allows the creates volumes based on the total storage pool size.
+- nar_santricity_host - Add eseries_storage_pool_configuration list options, criteria_volume_count, criteria_reserve_free_capacity_pct, and common_volume_host to facilitate volumes based on percentages of storage pool or volume group.
+- nar_santricity_host - Add support for snapshot group creation.
+- nar_santricity_host - Improve host mapping information discovery.
+- nar_santricity_host - Improve storage system discovery related error messages.
+- nar_santricity_management - Add support for server certificate management.
+
+Bugfixes
+--------
+
+- nar_santricity_host - Fix README.md examples.
+
+v1.1.0
+======
+
+Release Summary
+---------------
+
+This release focused on providing volume details to through the netapp_volumes_by_initiators in the na_santricity_facts module, improving on the nar_santricity_common role storage system API information and resolving issues.
+
+Minor Changes
+-------------
+
+- Add functionality to remove all inventory configuration in the nar_santricity_host role. Set configuration.eseries_remove_all_configuration=True to remove all storage pool/volume configuration, host, hostgroup, and lun mapping configuration.
+- Add host_types, host_port_protocols, host_port_information, hostside_io_interface_protocols to netapp_volumes_by_initiators in the na_santricity_facts module.
+- Add storage pool information to the volume_by_initiator facts.
+- Add storage system not found exception to the common role's build_info task.
+- Add volume_metadata option to na_santricity_volume module, add volume_metadata information to the netapp_volumes_by_initiators dictionary in na_santricity_facts module, and update the nar_santricity_host role with the option.
+- Improve nar_santricity_common storage system api determinations; attempts to discover the storage system using the information provided in the inventory before attempting to search the subnet.
+- Increased the storage system discovery connection timeouts to 30 seconds to prevent systems from not being discovered over slow connections.
+- Minimize the facts gathered for the host initiators.
+- Update ib iser determination to account for changes in firmware 11.60.2.
+- Use existing Web Services Proxy storage system identifier when one is already created and one is not provided in the inventory.
+- Utilize eseries_iscsi_iqn before searching host for iqn in nar_santricity_host role.
+
+Bugfixes
+--------
+
+- Fix check_port_type method for ib iser when ib is the port type.
+- Fix examples in the netapp_e_mgmt_interface module.
+- Fix issue with changing host port name.
+- Fix na_santricity_lun_mapping unmapping issue; previously mapped volumes failed to be unmapped.
diff --git a/ansible_collections/netapp_eseries/santricity/CONTRIBUTING.md b/ansible_collections/netapp_eseries/santricity/CONTRIBUTING.md
new file mode 100644
index 000000000..fb7f482c7
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/CONTRIBUTING.md
@@ -0,0 +1,37 @@
+# Contributing
+
+Thank you for your interest in contributing to the E-Series SANtricity Collection! 🎉
+
+We appreciate that you want to take the time to contribute! Please follow these steps before submitting your PR.
+
+## Creating a Pull Request
+
+1. Please search [existing issues](https://github.com/netappeseries/santricity/issues) to determine if an issue already exists for what you intend to contribute.
+2. If the issue does not exist, [create a new one](https://github.com/netappeseries/santricity/issues/new) that explains the bug or feature request.
+ * Let us know in the issue that you plan on creating a pull request for it. This helps us to keep track of the pull request and make sure there isn't duplicate effort.
+3. Before creating a pull request, write up a brief proposal in the issue describing what your change would be and how it would work so that others can comment.
+ * It's better to wait for feedback from someone on NetApp's E-Series SANtricity Collection development team before writing code. We don't have an SLA for our feedback, but we will do our best to respond in a timely manner (at a minimum, to give you an idea if you're on the right track and that you should proceed, or not).
+4. Sign and submit [NetApp's Corporate Contributor License Agreement (CCLA)](https://netapp.tap.thinksmart.com/prod/Portal/ShowWorkFlow/AnonymousEmbed/3d2f3aa5-9161-4970-997d-e482b0b033fa).
+ * From the **Project Name** dropdown select `E-Series SANtricity Collection`.
+ * For the **Project Website** specify `https://github.com/netappeseries/santricity`
+5. If you've made it this far, have written the code that solves your issue, and addressed the review comments, then feel free to create your pull request.
+
+Important: **NetApp will NOT look at the PR or any of the code submitted in the PR if the CCLA is not on file with NetApp Legal.**
+
+## E-Series SANtricity Collection Team's Commitment
+
+While we truly appreciate your efforts on pull requests, we **cannot** commit to including your PR in the E-Series SANtricity Collection project. Here are a few reasons why:
+
+* There are many factors involved in integrating new code into this project, including things like:
+ * support for a wide variety of NetApp backends
+ * proper adherence to our existing and/or upcoming architecture
+ * sufficient functional and/or scenario tests across all backends
+ * etc.
+
+In other words, while your bug fix or feature may be perfect as a standalone patch, we have to ensure that the changes work in all use cases, configurations, backends and across our support matrix.
+
+* The E-Series SANtricity Collection team must plan our resources to integrate your code into our code base and CI platform, and depending on the complexity of your PR, we may or may not have the resources available to make it happen in a timely fashion. We'll do our best.
+
+* Sometimes a PR doesn't fit into our future plans or conflicts with other items on the roadmap. It's possible that a PR you submit doesn't align with our upcoming plans, thus we won't be able to use it. It's not personal.
+
+Thank you for considering to contribute to the E-Series SANtricity Collection project! \ No newline at end of file
diff --git a/ansible_collections/netapp_eseries/santricity/COPYING b/ansible_collections/netapp_eseries/santricity/COPYING
new file mode 100644
index 000000000..f288702d2
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/COPYING
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ <program> Copyright (C) <year> <name of author>
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<https://www.gnu.org/licenses/>.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/netapp_eseries/santricity/FILES.json b/ansible_collections/netapp_eseries/santricity/FILES.json
new file mode 100644
index 000000000..e9ebe194a
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/FILES.json
@@ -0,0 +1,1867 @@
+{
+ "files": [
+ {
+ "name": ".",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "COPYING",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3972dc9744f6499f0f9b2dbf76696f2ae7ad8af9b23dde66d6af86c9dfb36986",
+ "format": 1
+ },
+ {
+ "name": "changelogs",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "changelogs/config.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7182177f9666dbf6e7e8d2c595c772e683e5726978a307ae1c1e00f3601df70f",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "changelogs/.plugin-cache.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ecd87bed65376e65dda0b9f18528a790fa71ff829d5afc573318a182b2d694ff",
+ "format": 1
+ },
+ {
+ "name": "changelogs/changelog.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5582e5bffc0a5ff23578db8394eb3cbfc99a4cca62e2cf40325962c970399111",
+ "format": 1
+ },
+ {
+ "name": "plugins",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/santricity.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "611502a07e010d0ec1b835868adee7c5f78363e2017711d82e4301f697b77a43",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/netapp.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6cd836658ea68148bdb845d8c2f9f1fb081c0fbb93fd2278f1f8b932096b5b2a",
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup/santricity_host.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "218b254acba3f00e892b9bff1781dfe214f2ba4559c6dbaff6735c4ad85dcf97",
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup/santricity_host_detail.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "af4d8824a03ef0e4ad761d0f98d80d53cd54666c28a370576111803064c97943",
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup/santricity_lun_mapping.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "551c3d85494032b27b6e4d5046d210d7829121fe8f84c40498902f0c2cee8198",
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup/santricity_storage_pool.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "885633fd11b5772a6ae1aad9ef206da68eecf65ee5f1411341572060cbb3f3e3",
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup/santricity_volume.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1776d6aa9976abc7153ab8c1ed07746377a47fb427d4d5e24ea5f77fd2255fbe",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_santricity_alerts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "399ee2a7c09d85954605905124f1772ab814b2648a2f2f9f73fc8a5c5f09e61e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_santricity_alerts_syslog.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b189749b72b9f030916e61f9521d0eaea73660e2802a889cbb4f46d6904fdd15",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_santricity_asup.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f0484c55a975c39e10142ff9456674124eb48631dcc26d7d971838d10dddea10",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_santricity_auditlog.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ab6c86b0c9105cfcf5743c3bf9ae67cca08b2b7c0d882e19ea11268282da1c0d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_santricity_auth.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b6c6a8d64d54b0c24c668f89e7fea21381e101b6abfe98f31175376df928d52f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_santricity_client_certificate.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7073c2929a9c8acb497773c5342a40818ee78c39531bedf7a2162ec86e4b6b5d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_santricity_discover.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "27f7da8a3870abec8063ccce6f7dfc7f915b89219eee18fcc732c863caf44867",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_santricity_drive_firmware.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "26e41b7eed2547073319e19ca2c36bf018364ba8b1d1031259a4da33226fd7f5",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_santricity_host.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2a4c62f7b8e5edc498677381b1d2b32750aad934fc0c6f5914d7fdde3a0c1adb",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_santricity_hostgroup.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d044e70f0a5fb572e74f0304c9742cf21a4dde2a253b5216f7173d2bc2261a17",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_santricity_ib_iser_interface.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dd3e18496bef9e385423b73bb1f4cc43c7e811da85948f6975b50468fad38f79",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_santricity_iscsi_target.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6289cc37e292daac6d32831812fb0e4115c5fc9d6ccf554308cc3a4103470500",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_santricity_ldap.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9857644c847cb7eeaaf5ffbae4777db38c031a15625414a54cabfdb277da7276",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_santricity_lun_mapping.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fd0716ace016a9f6df416b312b2a0ea956c9e9d678ee439a1cc7f503a54ece4a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_santricity_nvme_interface.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6e6e39b99e192d75b285a716763d57079bf76b6ca52b7d8f180a61bf30eccbce",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_santricity_proxy_drive_firmware_upload.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2f3b35caf8eec64d27d07f5a7d33d07eb84b2258ca69575f6f752656881bc826",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_santricity_proxy_firmware_upload.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "38f9dda021a89f8d33babf643c20f7b20954d5fa54522e18369b638777f1c221",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_santricity_proxy_systems.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f89e77fdbd352d8c14e5dca4f6e6ad42d599d05e85c19cd4d890a09864011759",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_santricity_server_certificate.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a1cc849011618d879f05483f64f5d35547ac970b50171f0c17c75665b3f19e55",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_santricity_snapshot.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "97f1d5712a37e872ef0ab29d23e9a0268361191abb645e963827d751b459cdbf",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_santricity_storagepool.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f2f2120d3124515aa701f5b7859c5124e20fab052d52b06a5a4806c968ff9cd5",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_santricity_syslog.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7762155ce54a5d7b8b2cbda177cb16052cc829459ae76aa6673593f4dfa82105",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/netapp_e_alerts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7d4ce3c217f6ba9e6666a0e66560747e4a3ed9e4e613deeee5a4a51f40b73c7f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/netapp_e_amg.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cb66d4e13f9b36a7cbeee5b858f6968177b18dcef6d1d75e8e0f3388987520b9",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/netapp_e_amg_role.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c43a87ad2d8c63201e8025e2a144845946a8855cfd0374838911308d3652d7cf",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/netapp_e_amg_sync.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a88c7ad0156ae75069b1a453f7d60e741c1de35500c888f142811eb393c9cb3a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/netapp_e_asup.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4b50f232c688c6ac698a4ed681256e6f5590a88f8071b66906df61da76a7e002",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/netapp_e_auditlog.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "036a74f5d39604cb04b4eb48665485e9c2419f1e62889a34bdf6be7f83deef7d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/netapp_e_auth.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "13987a41072dde6d6f187ede14c87904defab48c3fd7fbeda2790e53cdfe0594",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/netapp_e_drive_firmware.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b0ce6a9d433adffe20a4a777ae6e473da95072e3da723b1426383deada085bb5",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/netapp_e_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3d5fc8d9dd8230df962c972bff06d261c8d6cf1cedd2eb3b8fe9fe881db99f9d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/netapp_e_firmware.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b4dc8c105fc695981b1c3a85bd47ef9dc310ffd2eee9ac4e8d10bcbee2b30f9f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/netapp_e_flashcache.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c16c7dcfdb0324510d29032a08f4b67509893ae717cf93affed3b183f826f8f1",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/netapp_e_global.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "07a977d571ff46e74fb81017559ad8f6c4728da9f849703c90b11b2ef1a7bb6e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/netapp_e_host.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "16b8db9684ceff4dd15f71eb88c0898341eb3fe3ef4d030d88331c0438cced1a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/netapp_e_hostgroup.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2ca6f342ca6c0fb5e446c309aa7d4a677f59a4a2b8b21906bb8886d62dadc319",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/netapp_e_iscsi_interface.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0ad785af26b09545e187f4cd672ee896cc94bca8a0a73d591d470a52dd5dba5c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/netapp_e_iscsi_target.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "40f6fa9a6e465df49009997b1b32aa56b363f43c2b79f3d039ccd83deb23f9de",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/netapp_e_ldap.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d1d1ef7b578717e500990221238834ab0402e82eb46cbf6166c5603e436388ce",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/netapp_e_lun_mapping.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "12ad1e184c0a38ec4429d72d0c8173c114b399a5822d04d082c14980d00f092e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/netapp_e_mgmt_interface.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cbf5a90bd262ea363bc70349b6c490fd9221d7d6a2c323068e9f7941f0dc2f52",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/netapp_e_snapshot_group.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f6d47bfac5cf52cbc652f4bcb68179c9bed9f30363027fe35c5c28ff4d11a40c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/netapp_e_snapshot_images.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "73874df191a3698c1e2ecdd3dda0256c3416a0bc274dd989b156d049b93154cb",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/netapp_e_snapshot_volume.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "639c77978ca3b08a5d7732c61cce2676ba047e896ecf7bca727c2e97849e622e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/netapp_e_storage_system.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b21a4263209e8e03b23268bd0513464d3a7c3df403f48b1ab063329498ecd479",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/netapp_e_storagepool.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6151a66e8654d2bef10ef1bfa06b34c95cb9bafadd74d3d68d85882fa02b2b2d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/netapp_e_volume.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0bb0832e76055aa1eae8afb4936d3c95cef2f72f8a3d2724f512de89eac22817",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/netapp_e_volume_copy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1ca543b8dfcc630acab9aaa130a833f81ed00f5a9868dca7e535873363f157a1",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/netapp_e_syslog.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ee4a477da7ad80aece4f72332419baf3cb0b26c9e1007b62a32b44d36bf582d5",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_santricity_firmware.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b68ce256cc54135ea2305b4d980eac70ca55a07197599f0abfb4da3ab91bf65d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_santricity_volume.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "34a62e9feec3ca0c12db7d639d6010619d4b8aa29edec099db3107e56153bcbe",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_santricity_global.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "32b4a2312375e3122e308fa112dd68e111778d37748e29c4cd6221a925dafa48",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_santricity_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fa9cd58c66b6c12cf5288baa87e8943ab664ba618e56ab822216a58f524371de",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_santricity_iscsi_interface.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "781290f82341a0518d116d12f91b26fe0b408b74405ce42a3a3ef6096ed7c2b9",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_santricity_mgmt_interface.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0f6a784dd79f643f2c78c634d933ca9d2c935ddb3e80ed7cf202660d38c0c86a",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/netapp.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ee54c8258cf42c09a7e88db777cf8bb0242280ffcb0b2e06c0c1b27942644246",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/santricity.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0d0ed9e58e40c75b5fd66265116b73ec02802d1737dae5d22b5eea01fc40ccd9",
+ "format": 1
+ },
+ {
+ "name": "roles",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/nar_santricity_common",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/nar_santricity_common/.travis.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "43e63a93716f15cdb102c201e4631be604b45910b8c749c78d89e3fc076c7c7e",
+ "format": 1
+ },
+ {
+ "name": "roles/nar_santricity_common/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/nar_santricity_common/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "799d6a2bac64c86b9ba7ff3a06021453a92411a452b81fb040771895e91b9037",
+ "format": 1
+ },
+ {
+ "name": "roles/nar_santricity_common/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/nar_santricity_common/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "aad95c050af5dfb0a69f30b06cf37adbefacd21c4f76951d5c6b6ad877823233",
+ "format": 1
+ },
+ {
+ "name": "roles/nar_santricity_common/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/nar_santricity_common/tasks/build_info.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7f88f4ddfbbb10618b60685532e2d7f6cdae42404a2c19fc5615a2e999b99d26",
+ "format": 1
+ },
+ {
+ "name": "roles/nar_santricity_common/tasks/collect_facts",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/nar_santricity_common/tasks/collect_facts/validate_system_api_url.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cf4da03e9df313b537fd5d223c8f84d108fa5c3d4c33af6aea1f35e039b5eadb",
+ "format": 1
+ },
+ {
+ "name": "roles/nar_santricity_common/tasks/collect_facts/prefer_embedded.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "07edc297d303fb5a77a3f0105c9a7017ecf70a4cf8a14aa34991c1485ea30af9",
+ "format": 1
+ },
+ {
+ "name": "roles/nar_santricity_common/tasks/collect_facts/prefer_proxy.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ff675445d271a61f0f1d2ebbba90503c729ba35cc67ad028153e002e81efee8d",
+ "format": 1
+ },
+ {
+ "name": "roles/nar_santricity_common/tasks/collect_facts/discovery.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5541fa4fdeb93b4ca6c6aa2345eb3004021b26c9e6682899a3ba69fdd51403e7",
+ "format": 1
+ },
+ {
+ "name": "roles/nar_santricity_common/tasks/proxy.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "33760d17ff09f6af8cf80ad028687ca6db7f1082c13cf19b48a9cdff94eb606a",
+ "format": 1
+ },
+ {
+ "name": "roles/nar_santricity_common/tasks/proxy_security.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7fe8bf482a01af5d5cd9ee6ff1a8fe6f0a28b6d1ef91241874d3c03b0185b040",
+ "format": 1
+ },
+ {
+ "name": "roles/nar_santricity_common/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d2dd3e39d5c8ea70e739b4b9342cd7693f4241daf2cd08fc26c17e80318f7393",
+ "format": 1
+ },
+ {
+ "name": "roles/nar_santricity_common/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "931e84bef0c09299b87a808e5ab612c9933b1adc451a6a1ed2fcb0ffd2eb2e39",
+ "format": 1
+ },
+ {
+ "name": "roles/nar_santricity_host",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/nar_santricity_host/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/nar_santricity_host/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8b20abf6125e49f33f1bbe04bdd54e8be0aa203808f1e2404ce1dc6b083af2a3",
+ "format": 1
+ },
+ {
+ "name": "roles/nar_santricity_host/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/nar_santricity_host/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3ae98780f1384338d258c6fba548ed8952f3ce57b87c717a991ea917452ba113",
+ "format": 1
+ },
+ {
+ "name": "roles/nar_santricity_host/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/nar_santricity_host/tasks/initiator.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ddaa5eabe27a77500e5e23beb42855e5f4c49e0cb357af335f8fc43ed6e9f13a",
+ "format": 1
+ },
+ {
+ "name": "roles/nar_santricity_host/tasks/interface",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/nar_santricity_host/tasks/interface/ib_iser.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e84cfa03b99d3634f1da3eaef694d1b19bfde995fd76e803242f1117e8e43c71",
+ "format": 1
+ },
+ {
+ "name": "roles/nar_santricity_host/tasks/interface/nvme_ib.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3c2dc845874503fdbbb976a9a14275f9ca72feceaae983d5be754bf8fc268f58",
+ "format": 1
+ },
+ {
+ "name": "roles/nar_santricity_host/tasks/interface/nvme_roce.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "95c9683ffd5dcfd85d4061d4e7b782bfe446c837c4f7299a2ee0648203059f55",
+ "format": 1
+ },
+ {
+ "name": "roles/nar_santricity_host/tasks/interface/iscsi.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9c358dd35973cf218d5f7244774f522d4ef0066f548c57caac08512ae217b546",
+ "format": 1
+ },
+ {
+ "name": "roles/nar_santricity_host/tasks/snapshot",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/nar_santricity_host/tasks/snapshot/group.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b131aa20ee6e527810640f7795a28d116412d947428c225da07cf06f6cb9a698",
+ "format": 1
+ },
+ {
+ "name": "roles/nar_santricity_host/tasks/snapshot/rollback.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d647b10ba795d5f9dbaec6edaddee3219bbec86dfae43ed8f5531059345381dc",
+ "format": 1
+ },
+ {
+ "name": "roles/nar_santricity_host/tasks/snapshot/view.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cf7b03986245e71bca63ddb953ac6558ada6ea1fea41f298a6eac53507a12db4",
+ "format": 1
+ },
+ {
+ "name": "roles/nar_santricity_host/tasks/storage_pool_absent.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4a126daac268275ac020ef123ceee5d5713e2dfa311b3d7c23ec461c42b13407",
+ "format": 1
+ },
+ {
+ "name": "roles/nar_santricity_host/tasks/storage_pool_present.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e337fe0aad4c1e77786ee724d00723d2ece20596444f57dd1361872a29b37c55",
+ "format": 1
+ },
+ {
+ "name": "roles/nar_santricity_host/tasks/volume.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a620ce2a74fe6b40f5c914584e7f54ae7afe9522e7068d013cf04392b0e242da",
+ "format": 1
+ },
+ {
+ "name": "roles/nar_santricity_host/tasks/interface.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "db6956f6983d36ca3a94cf454de5c0e27ca9f8f65f9a9e46a1d12506ab6bf0a8",
+ "format": 1
+ },
+ {
+ "name": "roles/nar_santricity_host/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bfbb01ed126696022c17cec8c85860283f1fa52d14d099f78e6f9b4eac87502a",
+ "format": 1
+ },
+ {
+ "name": "roles/nar_santricity_host/tasks/snapshot.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "72e5de1f246435c98925d79c5b4d037139c8b4f560ac9d89c8542423875056e3",
+ "format": 1
+ },
+ {
+ "name": "roles/nar_santricity_host/tasks/lun_mapping.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b2e5ebb38a812fe888935c84ebfc6013aa7f2c0e5aa30f3c68b1d8cb3be7a07a",
+ "format": 1
+ },
+ {
+ "name": "roles/nar_santricity_host/templates",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/nar_santricity_host/templates/hostnqn.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5957b6cb8ea9c4eccfcaa97b09169e456144e7d7f4d379266a60fda1e081f2f3",
+ "format": 1
+ },
+ {
+ "name": "roles/nar_santricity_host/templates/initiatorname_iscsi.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e5d0f276372ed34241309338f52e1c9c9b8ca2f6ae070804603f8ee42288c851",
+ "format": 1
+ },
+ {
+ "name": "roles/nar_santricity_host/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c69ab063fce4f0491dea655b3ae37524c4ee7bc70349615c60fa858e74ac9e51",
+ "format": 1
+ },
+ {
+ "name": "roles/nar_santricity_management",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/nar_santricity_management/.travis.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "43e63a93716f15cdb102c201e4631be604b45910b8c749c78d89e3fc076c7c7e",
+ "format": 1
+ },
+ {
+ "name": "roles/nar_santricity_management/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/nar_santricity_management/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f90b71b40d505ecfc8751dc9729b9b22b83a7777bc3eca4e6fb94b416857e78c",
+ "format": 1
+ },
+ {
+ "name": "roles/nar_santricity_management/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/nar_santricity_management/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "34f64f135fdc5fae0fd63e1542b1b0be22e287cccc1a5e4b97c9d74752f241b2",
+ "format": 1
+ },
+ {
+ "name": "roles/nar_santricity_management/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/nar_santricity_management/tasks/firmware.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7bd99200517048e400ac97c16ef9aeadd35103df351d9237ad0dd8651ffd9029",
+ "format": 1
+ },
+ {
+ "name": "roles/nar_santricity_management/tasks/logging.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "62a7cc1504af3365f694b7bbe6963d432d2ca09d9dcf8dff051d7683f62b2171",
+ "format": 1
+ },
+ {
+ "name": "roles/nar_santricity_management/tasks/security.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d23019241dca6bb023a8c80b4a28ee35a5eefca1c7798a33b6f41263bbfbb136",
+ "format": 1
+ },
+ {
+ "name": "roles/nar_santricity_management/tasks/system.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cbdcc666d2b7a58edc6106d40d68092c2db54876c92aec1703cc9ec5adf938f3",
+ "format": 1
+ },
+ {
+ "name": "roles/nar_santricity_management/tasks/interface.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0e26c94bb602eef5c9328228e369b24eebfc3b0cddda38632d87d55e88b0f05f",
+ "format": 1
+ },
+ {
+ "name": "roles/nar_santricity_management/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d0ab1bd7043cfe87afb9ac0b0f2f71b74d8e9629094aabdf2685b6dca814b770",
+ "format": 1
+ },
+ {
+ "name": "roles/nar_santricity_management/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f7ea932d96049a7149fbb50c9405af159f7679d9f3949b62ca77246f2ce50d04",
+ "format": 1
+ },
+ {
+ "name": "tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/integration_config.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0b541ccd104dd15f35a8ca62befd4a44fc202c2b73b668367e85887315fce9d9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_alerts",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_alerts/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_alerts/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4daa4382bae8129ae1b46aa2676864c81c1d30f7a9bf2b4bd9b97c0d560b4b1b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_alerts_syslog",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_alerts_syslog/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_alerts_syslog/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d3e7f2f74372dd47e02890b6992b8d1f4b3ee141941c1118c45eac7ed04e6933",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_asup",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_asup/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_asup/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1f99c211c88b05c103c7f066a3fcca5d1eeafe15d4c6217562e7a2bcc5649f77",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_auditlog",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_auditlog/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_auditlog/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "62d89c9146f8060a0b6028b88a2ad55ca3598f0757733964f6ef6f77e5761a47",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_auth",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_auth/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_auth/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "361e5c8d61f22ee7388fd0a4672f770f4af4dce09bfb85c2008dc0b5d4ef05c1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_client_certificate",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_client_certificate/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_client_certificate/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "57044ad5d4cb4a6abfb10f57fdc3fd67a9945e3487f4797ff2720e37522fa09a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_discover",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_discover/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_discover/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b2f4f4e50f6a6e8b82faca06a24d3a84fae705951a62508053b21554c11f26d1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_drive_firmware",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_drive_firmware/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_drive_firmware/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b53979118cf728240e2712524191638bfda8e38f7da9e2d623d4e8c0fe6bbf47",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_facts",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_facts/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_facts/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3877ef532f0f4c143815989ff7420414949c31c0aeda6a55a19c8c4749da1505",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_firmware",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_firmware/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_firmware/tasks/firmware_legacy_tests.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "386b07dcf7bcc6e8c5fb6c6cc507d601f64f026bd4c6f89ea5bc9e18d65cbe37",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_firmware/tasks/firmware_tests.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6ee8117ea3a5edda386eed46132760603aebe728fb44ab587c314a15ba26c4f3",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_firmware/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "51412bc402190a1e357ddac089b000508d3c656c164eed77f1ba80e3a4076bda",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_global",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_global/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_global/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "faf59aa803396eecb56306e273a68db4f43acb45b6e363408ce55de53598d750",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_host",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_host/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_host/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9d9e7ab4a64da2dffafcccd8d5c01953e61ea4bc0dca91c9697dc3d5387f7d71",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_hostgroup",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_hostgroup/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_hostgroup/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3a067b4a6c973dd830116420ecade22b3b18a8d687dbcae3a29fe90778f6a188",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_ib_iser_interface",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_ib_iser_interface/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_ib_iser_interface/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "916951a38b85304c670cb622ee88450903aaf75406a2bb91799f2bbb81528fc0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_iscsi_interface",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_iscsi_interface/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_iscsi_interface/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4099778174fac96f2a110faf9fc78c224f4c934de9c29641f1f9c90b7b9b1ed9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_iscsi_target",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_iscsi_target/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_iscsi_target/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cfe211b818a701aa7df286165e7509ee5ccddd3b6c160bd606492d4a990918fb",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_ldap",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_ldap/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_ldap/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2d0f299652f0253593b245ca409d72008d832c218f4a70fdb5a43709209acc32",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_lun_mapping",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_lun_mapping/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_lun_mapping/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "25fb026948ec8345d9ac774346d37b00aef88250b57a9d3d2248bc6952f843a7",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_mgmt_interface",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_mgmt_interface/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_mgmt_interface/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b6d4879a8d0e422ab24b8963c98c68ad9b3b8d89d950bc11c4cf9aebfdd32b89",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_nvme_interface",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_nvme_interface/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_nvme_interface/tasks/ib.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "61a29d8d1156bc1eb6569eaef22560df51bfa01c963290a0d9590edc6937191e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_nvme_interface/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "32c65df83cec2f34228c0d2098df2f667f60ce14d438b3587de5f7f048abc10f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_nvme_interface/tasks/roce.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "741543b96775ed57e939f053d8651429a3d29fccb8bd05c1c7791a4338c58cc6",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_proxy_drive_firmware_upload",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_proxy_drive_firmware_upload/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_proxy_drive_firmware_upload/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0f12de74d007683eb3f6b4f3309709d328b9b48eec45692c3d09a2d8ef497ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_proxy_firmware_upload",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_proxy_firmware_upload/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_proxy_firmware_upload/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "82681851f1dad9d2cdc816a80ea2315785152ed6d46e54cfe5d5927160118279",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_proxy_systems",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_proxy_systems/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_proxy_systems/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ae4770030714addb929c2facc94161dea7b4a74dd702b38a0a1373e42588da9d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_storagepool",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_storagepool/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_storagepool/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cb6262188f4495ed0475276ba256d76fb59b23d3df8ff87be742207ac64f0478",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_syslog",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_syslog/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_syslog/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "56973d5b71207809d2231bee3a57a5ba357b7fa87883a298fbc8cec20ceeaeab",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_volume",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_volume/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/na_santricity_volume/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "883075039717541225bdd12fc4d5244c2c33356f1673e1eb62bba8ad6e612539",
+ "format": 1
+ },
+ {
+ "name": "tests/unit",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/test_na_santricity_alerts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a7a2ed86b5246879519082addbb67b21ea16888139b0d90015e1a5f7a7f78882",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/test_na_santricity_alerts_syslog.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "579d272823f833dd0eb8b0c9fa20d67ed4845b867323a5792a3f8009a9f28087",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/test_na_santricity_asup.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a952a3ecceac3b820d4508caa42049ed48235ec84fa63f51428c824b3ba42292",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/test_na_santricity_auditlog.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4058ede1bab1d1387be48b87c3a1b0252f6f5c7bc41150bdb523e0814692fbf7",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/test_na_santricity_auth.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b2a9dcda0f50e5e2ba027c90c0cdf7d5742d9789d6d222cf262d51afae7b704c",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/test_na_santricity_client_certificate.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5a85f8089f8dd5bff52e86387740e1b3e83f1e06afafee94487f15eef75e8697",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/test_na_santricity_discover.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "96e6da7639799fb31f8b13d75831735d4cf2ba0905cc8c0613c3c58a864f1c11",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/test_na_santricity_drive_firmware.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "67defa8018bff4056c4c5624e97574741c76a2dde57d4be4cea0343576f5468f",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/test_na_santricity_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2d40f697e305ca63014f8d69bd1aa914f9146d7791be81693b9e239fff61d4b1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/test_na_santricity_firmware.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d8748be343eaaea095feae0de2800fffb6ffdfe7f73ba9ea18cb21fded98a265",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/test_na_santricity_global.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7966de3e35426ecc5b1875aefe37b03d78f86839df51eb603e16a070810a30a3",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/test_na_santricity_host.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0fc025ae166e4f75c8cda22914b12e89c14ae3af15c6e414aaebc31b751a56ee",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/test_na_santricity_hostgroup.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "482ba9550dfe4c1021ca7ea1777a87b0bc40bed7608be3a3ef44677a1a5bb2da",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/test_na_santricity_ib_iser_interface.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a1a1a00e49f08222e1188020acaa6486ee10594bc4fc6cc6959daceac159ea7a",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/test_na_santricity_iscsi_interface.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9808f5cb427e0ac02932e1d026c863847023eb88e854c53a5cb774e57c2a26fa",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/test_na_santricity_iscsi_target.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5512b6015214a44fba880d41fe6118c8ea32443e0601a979a406b66bd71505b4",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/test_na_santricity_ldap.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c145ca85c3a79494c64d21e422f0d0c294a4eaa8b16b2b98ea1f7054556a37f9",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/test_na_santricity_lun_mapping.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "681ad2861a1e7083dcffeeae9b5e7631ecdc925bb6353c77e1002fd018da252d",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/test_na_santricity_mgmt_interface.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "529296bf01960cc1c18eb41eb53d5e1900aa8cba5f046d22c600029b8c90392e",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/test_na_santricity_nvme_interface.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "91329b895c2a014398db31e06b3adf5974a2170cd9c21f5a5d4644f74171c037",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/test_na_santricity_proxy_drive_firmware_upload.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "19fd540ade11b616ac3e8b2f1d7126bbeb4efc1062174300b5dd900ec18f68b4",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/test_na_santricity_proxy_firmware_upload.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a501a034d03112ac421515a9898cee92c94fc424523f495317ef1f5930a7eac8",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/test_na_santricity_proxy_systems.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "98b029bbba2aeab8a7f589b01fe18d2875c56b7f25b5061abf627572f6bf68c6",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/test_na_santricity_storagepool.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "68752e3cbbe6490630cec79684dc9868e1474daeed6dba6e0fc61a6547381b58",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/test_na_santricity_syslog.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "64f46c3b1ed4401f6d8ca6f90bbd5a6911c1a0ec7fe5405d8fa4797efcc944d6",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/test_na_santricity_volume.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "16bbde2e9503cb45324f258cd0ec97d939f4473dde118b45689e5b794ecef680",
+ "format": 1
+ },
+ {
+ "name": "vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "vars/hubPreCheck.groovy",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1a0f2fd90b2cc895e4c16fc58066441d43ae0fb9156a9d86ebe19175092ac722",
+ "format": 1
+ },
+ {
+ "name": "vars/hubScan.groovy",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "17ff631dacfa7affc325cf3a8dee4d0251ea83e950dca43c34e30fbf0a0fba68",
+ "format": 1
+ },
+ {
+ "name": "vars/hubScanDocker.groovy",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "06344da5db89a6ee4abad8d4ab87e4649a4c4116739006124977e9f1c90453df",
+ "format": 1
+ },
+ {
+ "name": "vars/hubScanProject.groovy",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2e5d3dc4ad8fbe5202c7353e18940f28648c4af9727f5e9a245a0169691581be",
+ "format": 1
+ },
+ {
+ "name": "vars/setupBlackduckBuildParameters.groovy",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1b0a8bbf6c402e999f533b8317c536ab1a786055f88d12493dc1cb205c243f9c",
+ "format": 1
+ },
+ {
+ "name": "vars/setupBuildParameters.groovy",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2e104536de237b7f1d36638b956a6dfb5c9e813cd0760856574416b4dd4b99b5",
+ "format": 1
+ },
+ {
+ "name": "vars/setupSynopsysDetect.groovy",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "63170d1242b258731ab477dbdb172a6bf5c454c33398d81bc9f65a6d0d16cef5",
+ "format": 1
+ },
+ {
+ "name": "CONTRIBUTING.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "077a75c259dc0eb7fc9a14411bfbc30953e63467d7febb6326c77a3690359910",
+ "format": 1
+ },
+ {
+ "name": "Jenkinsfile.blackduck",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8b4dbc641fc7491c801d0c0e403ecc85c40e322568f13bb53cd3502a98c9f08d",
+ "format": 1
+ },
+ {
+ "name": "ansible.cfg",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ef48fa62ee0052bafcf9086d7bb336efee24347f70c091dcabeafa70eab8e8e4",
+ "format": 1
+ },
+ {
+ "name": ".gitignore",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a89a5f70a89fce50dbc40c50b220fafa7416cc282a017fc35295b67cfc814178",
+ "format": 1
+ },
+ {
+ "name": "meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "meta/runtime.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7647e2bcadbce71140df8bb7bb14d56ea18ce2d91e350ed532f719f2474a7d5c",
+ "format": 1
+ },
+ {
+ "name": "CHANGELOG.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "689027f80bc35af3cff0dd827f43d2bf8ccc6723f75ea9941f8263bb0b18df47",
+ "format": 1
+ },
+ {
+ "name": "README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "00055619f79649cd355c38d06e2cf0216e4a5089f13cb4593b73c645e2e1ed4e",
+ "format": 1
+ },
+ {
+ "name": "eseries-ansible-collections-diagram.png",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b816db08831f19fb8416ad366ad908f5865d3db1ed732c0385f342868e7a4c17",
+ "format": 1
+ }
+ ],
+ "format": 1
+} \ No newline at end of file
diff --git a/ansible_collections/netapp_eseries/santricity/Jenkinsfile.blackduck b/ansible_collections/netapp_eseries/santricity/Jenkinsfile.blackduck
new file mode 100644
index 000000000..3e4fb6f01
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/Jenkinsfile.blackduck
@@ -0,0 +1,57 @@
+// Copyright 2022 NetApp, Inc. All Rights Reserved.
+// Licensed under the BSD-3-Clause.
+
+// Set up build parameters so any branch can be manually rebuilt with different values.
+properties([
+ parameters([
+ string(name: 'hubProjectVersion', defaultValue: '', description: 'Set this to force a BlackDuck scan and ' +
+ 'manually tag it to a particular BlackDuck version (e.g. 1.0.1).')
+ ])
+])
+
+hubProjectName = 'esg-ansible-santricity-collection'
+hubProjectVersion = 'master'
+if (params.hubProjectVersion != '') {
+ // Tag the manually selected version if the hubProjectVersion build parameter is set.
+ hubProjectVersion = params.hubProjectVersion
+}
+
+pipeline {
+ agent any
+
+ options {
+ timestamps()
+ timeout(time: 3, unit: 'HOURS')
+ buildDiscarder(logRotator(artifactNumToKeepStr: '15'))
+ }
+
+ stages {
+ stage("BlackDuck Scan") {
+ options {
+ timeout(time: 60, unit: 'MINUTES')
+ }
+
+ steps {
+
+
+ echo "Performing BlackDuck scanning..."
+ synopsys_detect detectProperties: """
+ --detect.project.name=${hubProjectName} \
+ --detect.project.version.name=${hubProjectVersion} \
+ --detect.cleanup=false \
+ --detect.project.code.location.unmap=true \
+ --detect.detector.search.depth=50 \
+ --detect.code.location.name=${hubProjectName}_${hubProjectVersion}_code \
+ --detect.bom.aggregate.name=${hubProjectName}_${hubProjectVersion}_bom \
+ --detect.excluded.directories=blackduck/ \
+ --detect.output.path=blackduck
+ """
+ }
+ post {
+ success {
+ archiveArtifacts(artifacts: 'blackduck/runs/**')
+ }
+ }
+ }
+ }
+}
diff --git a/ansible_collections/netapp_eseries/santricity/MANIFEST.json b/ansible_collections/netapp_eseries/santricity/MANIFEST.json
new file mode 100644
index 000000000..7b06f7ee2
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/MANIFEST.json
@@ -0,0 +1,36 @@
+{
+ "collection_info": {
+ "namespace": "netapp_eseries",
+ "name": "santricity",
+ "version": "1.4.0",
+ "authors": [
+ "Joe McCormick (@iamjoemccormick)",
+ "Nathan Swartz (@ndswartz)"
+ ],
+ "readme": "README.md",
+ "tags": [
+ "netapp",
+ "eseries",
+ "santricity"
+ ],
+ "description": "Latest content available for NetApp E-Series Ansible automation.",
+ "license": [
+ "GPL-3.0-only",
+ "BSD-3-Clause"
+ ],
+ "license_file": null,
+ "dependencies": {},
+ "repository": "https://www.github.com/netapp-eseries/santricity",
+ "documentation": "https://www.netapp.com/us/media/tr-4574.pdf",
+ "homepage": "https://www.github.com/netapp-eseries/santricity",
+ "issues": "https://github.com/netappeseries/santricity/issues"
+ },
+ "file_manifest_file": {
+ "name": "FILES.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8ca82e60ff032c6438a4b21aeb0d9cda1b9591adedba932cdc73be72361184c9",
+ "format": 1
+ },
+ "format": 1
+} \ No newline at end of file
diff --git a/ansible_collections/netapp_eseries/santricity/README.md b/ansible_collections/netapp_eseries/santricity/README.md
new file mode 100644
index 000000000..1bdc05e66
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/README.md
@@ -0,0 +1,1053 @@
+NetApp E-Series SANtricity Collection
+=========
+ The SANtricity collection consist of the latest available versions of the NetApp E-Series SANtricity modules and roles.
+
+ This collection provides NetApp E-Series customers with a wide range of configuration options through the collection's modules. However, the real
+ benefit of using the SANtricity collection is found in the host and management roles. These roles provide ready-made, policy-based orchestration for
+ E-Series platforms based on predefined role variables.
+ Once the physical hardware has been installed, the SANtricity roles are capable of discovering the DHCP-assigned addresses, setting initial passwords
+ and management interfaces so your automation can do full deployments for you without logging directly into the devices. Yet that's just the beginning,
+ the management role will also ensure alerts, ASUP, logging, LDAP, and firmware are configured as expected; and the host role will setup host interfaces,
+ provision and map storage, and if your servers are defined in the inventory, correctly populate E-Series hosts and host groups automatically.
+
+
+ Roles:
+ - nar_santricity_common: Discover NetApp E-Series storage systems and configures SANtricity Web Services Proxy.
+ - nar_santricity_host: Configure storage pools, volumes, hosts, host groups, port interfaces and snapshots.
+ - nar_santricity_management: Manage storage system's name, management interfaces, alerts, syslog, auditlog, asup, ldap, certificates, drive firmware and controller firmware.
+
+ Modules:
+ - na_santricity_alerts: Manage email alert notification settings
+ - na_santricity_alerts_syslog: Manage syslog servers receiving storage system alerts
+ - na_santricity_asup: Manage auto-support settings
+ - na_santricity_auditlog: Manage audit-log configuration
+ - na_santricity_auth: Set or update the password for a storage array
+ - na_santricity_client_certificate: Manage remote server certificates
+ - na_santricity_server_certificate: Manage storage system certificates
+ - na_santricity_discover: Discover E-Series storage systems on a subnet
+ - na_santricity_drive_firmware: Manage drive firmware
+ - na_santricity_facts: Retrieve facts about NetApp E-Series storage arrays
+ - na_santricity_firmware: Manage firmware
+ - na_santricity_global: Manage global settings configuration
+ - na_santricity_host: Manage eseries hosts
+ - na_santricity_hostgroup: Manage array host groups
+ - na_santricity_ib_iser_interface: Manage InfiniBand iSER interfaces.
+ - na_santricity_iscsi_interface: Manage iSCSI interface configuration
+ - na_santricity_iscsi_target: Manage iSCSI target configuration
+ - na_santricity_ldap: Manage LDAP integration to use for authentication
+ - na_santricity_lun_mapping: Manage lun mappings
+ - na_santricity_mgmt_interface: Manage management interface configuration
+ - na_santricity_nvme_interface: Manage NVMe interfaces
+ - na_santricity_proxy_drive_firmware_upload: Manage proxy drive firmware cache.
+ - na_santricity_proxy_firmware_upload: Manage proxy storage system firmware cache.
+ - na_santricity_proxy_systems: Manage proxy storage systems.
+ - na_santricity_snapshot: Manage Snapshot groups, volumes, and rollbacks.
+ - na_santricity_storage_system: Manage SANtricity web services proxy storage arrays
+ - na_santricity_storagepool: Manage volume groups and disk pools
+ - na_santricity_syslog: Manage syslog settings
+ - na_santricity_volume: Manage storage volumes
+
+ *** Note that the following deprecated modules will be removed in a future release.
+ Deprecated Modules:
+ - netapp_e_alerts: Manage email notification settings
+ - netapp_e_amg: Create, remove, and update asynchronous mirror groups
+ - netapp_e_amg_role: Update the role of a storage array within an Asynchronous Mirror Group (AMG)
+ - netapp_e_amg_sync: Conduct synchronization actions on asynchronous mirror groups
+ - netapp_e_asup: Manage auto-support settings
+ - netapp_e_auditlog: Manage audit-log configuration
+ - netapp_e_auth: Set or update the password for a storage array
+ - netapp_e_drive_firmware: Manage drive firmware
+ - netapp_e_facts: Retrieve facts about NetApp E-Series storage arrays
+ - netapp_e_firmware: Manage firmware
+ - netapp_e_flashcache: Manage SSD caches
+ - netapp_e_global: Manage global settings configuration
+ - netapp_e_hostgroup: Manage eseries hosts
+ - netapp_e_host: Manage array host groups
+ - netapp_e_iscsi_interface: Manage iSCSI interface configuration
+ - netapp_e_iscsi_target: Manage iSCSI target configuration
+ - netapp_e_ldap: Manage LDAP integration to use for authentication
+ - netapp_e_lun_mapping: Create, delete, or modify lun mappings
+ - netapp_e_mgmt_interface: Manage management interface configuration
+ - netapp_e_snapshot_group: Manage snapshot groups
+ - netapp_e_snapshot_images: Create and delete snapshot images
+ - netapp_e_snapshot_volume: Manage snapshot volumes
+ - netapp_e_storagepool: Manage volume groups and disk pools
+ - netapp_e_storage_system: Manage Web Services Proxy manage storage arrays
+ - netapp_e_syslog: Manage syslog settings
+ - netapp_e_volume_copy: Create volume copy pairs
+ - netapp_e_volume: Manage storage volumes (standard and thin)
+
+Requirements
+------------
+ - NetApp E-Series E2800 platform or newer or NetApp E-Series SANtricity Web Services Proxy configured for older E-Series Storage arrays.
+ - Python cryptography >= 2.5 package is required for na_santricity_server_certificate and nar_santricity_management role.
+ - Python ipaddress and netaddr packages are required for na_santricity_discover, na_santricity_ and nar_santricity_common role.
+
+Tested Ansible Versions
+-----------------------
+ - Ansible 5.x (ansible-core 2.12)
+
+Example Playbook
+----------------
+ - hosts: eseries_storage_systems
+ gather_facts: false
+ collections:
+ - netapp_eseries.santricity
+ tasks:
+ - name: Ensure all management related policies are enforced.
+ import_role:
+ name: nar_santricity_management
+ - name: Ensure all host related policies are enforced.
+ import_role:
+ name: nar_santricity_host
+
+Example Storage System Inventory File (Simple example without storage system discovery)
+-------------------------------------
+ eseries_system_api_url: https://192.168.1.200:8443/devmgr/v2/
+ eseries_system_password: admin_password
+ eseries_validate_certs: false
+
+ eseries_system_name: my_eseries_array
+
+ eseries_management_interfaces:
+ config_method: static
+ subnet_mask: 255.255.255.0
+ gateway: 192.168.1.1
+ dns_config_method: static
+ dns_address: 192.168.1.253
+ ntp_config_method: static
+ ntp_address: 192.168.1.200
+ controller_a:
+ - address: 192.168.1.100
+ - address: 192.168.1.101
+ controller_b:
+ - address: 192.168.1.102
+ - address: 192.168.1.103
+
+ eseries_initiator_protocol: fc
+
+ # Storage pool and volume configuration
+ eseries_storage_pool_configuration:
+ - name: vg_01
+ raid_level: raid6
+ criteria_drive_count: 10
+ volumes:
+ - name: metadata
+ host: servers
+ size: 4096
+ - name: vg_02
+ raid_level: raid5
+ criteria_drive_count: 10
+ volumes:
+ - name: storage_[1-3]
+ host: servers
+ size: 2
+ size_unit: tb
+ volume_metadata: # Used by netapp_eseries.host.mount role to format and mount volumes
+ format_type: xfs
+ mount_opt: "noatime,nodiratime,logbufs=8,logbsize=256k,largeio,inode64,swalloc,allocsize=131072k,nobarrier,_netdev"
+ mount_dir: "/data/beegfs/"
+
+Example Storage System Inventory File (Discover storage system)
+-------------------------------------
+**Note that this discovery method works for SANtricity versions 11.62 or later, otherwise it will only discover the systems with unset passwords.**
+
+ eseries_system_serial: 012345678901
+ eseries_system_password: admin_password
+ eseries_subnet: 192.168.1.0/24
+
+ eseries_management_interfaces: # (Optional) Specifying static management interfaces can be used not only to discover the storage system but also to contact when valid.
+ config_method: static
+ controller_a:
+ - address: 192.168.1.100
+ controller_b:
+ - address: 192.168.1.101
+
+ (...) # Same as the previous examples starting with the eseries_validate_certs line
+
+Example Storage System Inventory File (Discover storage system with proxy)
+-------------------------------------
+ eseries_system_serial: 012345678901
+ eseries_system_password: admin_password
+
+ eseries_proxy_api_url: https://192.168.1.100:8443/devmgr/v2/
+ eseries_proxy_api_password: admin_password
+ eseries_subnet: 192.168.1.0/24
+ eseries_prefer_embedded: false # Overrides the default behavior of using Web Services Proxy when eseries_proxy_api_url is defined. This will only effect
+ # storage systems that have Embedded Web Services.
+
+ (...) # Same as the previous examples starting with the eseries_validate_certs line
+
+Example Storage System Inventory File
+-------------------------------------
+ eseries_system_api_url: https://192.168.1.200:8443/devmgr/v2/
+ eseries_system_password: admin_password
+ eseries_validate_certs: false
+
+ eseries_system_name: my_eseries_array
+ eseries_system_cache_block_size: 128
+ eseries_system_cache_flush_threshold: 90
+ eseries_system_autoload_balance: enabled
+ eseries_system_host_connectivity_reporting: enabled
+ eseries_system_default_host_type: Linux DM-MP
+
+ eseries_management_interfaces:
+ config_method: static
+ subnet_mask: 255.255.255.0
+ gateway: 192.168.1.1
+ dns_config_method: static
+ dns_address: 192.168.1.253
+ dns_address_backup: 192.168.1.254
+ ssh: true
+ ntp_config_method: static
+ ntp_address: 192.168.1.200
+ ntp_address_backup: 192.168.1.201
+ controller_a:
+ - address: 192.168.1.100
+ - address: 192.168.1.101
+ controller_b:
+ - address: 192.168.1.102
+ - address: 192.168.1.103
+
+ eseries_ldap_state: present
+ eseries_ldap_bind_username:
+ eseries_ldap_bind_password:
+ eseries_ldap_server:
+ eseries_ldap_search_base:
+ eseries_ldap_role_mappings:
+ ".*":
+ - storage.admin
+ - storage.monitor
+ - support.admin
+ - security.admin
+
+ eseries_client_certificate_certificates:
+ - /path/to/client_certificate.pem
+ eseries_server_certificate:
+ controller_a:
+ certificate:
+ - "/path/to/controller_a_server_certificate_bundle.pem"
+ controller_b:
+ passphrase: keypass
+ certificate:
+ - "/path/to/controller_b_server_certificate_bundle.pem"
+
+ eseries_firmware_firmware: "/path/to/firmware.dlp"
+ eseries_firmware_nvsram: "/path/to/nvsram.dlp"
+ eseries_drive_firmware_firmware_list:
+ - "/path/to/drive_firmware.dlp"
+
+ eseries_asup_state: enabled
+ eseries_asup_active: true
+ eseries_asup_days: [sunday, saturday]
+ eseries_asup_start: 17
+ eseries_asup_end: 24
+ eseries_asup_validate: false
+ eseries_asup_method: email
+ eseries_asup_email:
+ server: smtp.example.com
+ sender: noreply@example.com
+
+ eseries_syslog_state: present
+ eseries_syslog_address: 192.168.1.150
+ eseries_syslog_protocol: udp
+ eseries_syslog_port: 514
+ eseries_alert_syslog_servers:
+ - "address": 192.168.1.150
+ "port": 514
+ eseries_initiator_protocol: iscsi
+
+ # Controller port definitions
+ eseries_controller_iscsi_port_config_method: static
+ eseries_controller_iscsi_port_subnet_mask: 255.255.255.0
+ eseries_controller_iscsi_port:
+ controller_a:
+ - address: 192.168.2.100
+ - address: 192.168.2.110
+ controller_b:
+ - address: 192.168.3.100
+ - address: 192.168.3.110
+
+ # Storage pool and volume configuration
+ eseries_storage_pool_configuration:
+ - name: pool[1-2]
+ raid_level: raid6
+ criteria_drive_count: 10
+ volumes:
+ - name: "[pool]_volume[A-C]"
+ host: server_group
+ size: 10
+ size_unit: tb
+
+Storage System Credentials
+--------------------------
+ Proxy:
+ - The proxy's credentials should be defined at the group inventory level (eseries_proxy_api_url, eseries_proxy_api_password, eseries_subnet) and each storage system host inventory file should include its password (eseries_system_password) and the system's serial number (eseries_system_serial).
+ - You can substitute a unique proxy storage system identifier (eseries_proxy_ssid) for the system's serial number if the system has already been added to the proxy. However, using the storage system serial number is the preferred method since it will never change and is always known to the system. If you would like to be able to reference the system from the proxy using a unique identifier of your choice then add it in addition to the system's serial number.
+ - If your storage system has never been added to the Web Service Proxy, utilizing your system's serial number (eseries_system_serial) ensures that the SANtricity collection can discover it and set its initial admin password if not previously done.
+
+ Direct:
+ - Provide the storage system's serial number, password and the network subnet (eseries_system_serial, eseries_system_password, and eseries_subnet) to ensure that it will be discoverable; this information will also ensure the admin password has been set. The discovery process can be lengthy depending on the size and speed of the subnet. This can be mitigated by configuring static management interfaces (eseries_management_interfaces) otherwise each playbook execution will search for the DHCP assigned address.
+ - If the storage system's Web Service Embedded API url (eseries_system_api_url) is known then it can be used in place of the system's serial number.
+
+Snapshots
+---------
+ - Automate snapshot consistency groups with one or more base volumes. Consistency groups allow snapshot images to be taken consistently across multiple base volumes at a single point in time. Then use these images to either create snapshot volumes or rollback the base volumes to previous states. Snapshot volumes allow you to interact with the volumes as they were in either a read-only or read-write mode.
+ - Since E-Series storage systems are block storage and are unaware of both file system and application, it important to prepare volumes for snapshots. This requires applications to complete anything necessary to place the data in a valid state and file systems to complete and sync data to storage. To help facilitate these actions, checkout the snapshot role in the netapp_eseries.host collection (https://galaxy.ansible.com/netapp_eseries/host). This snapshot role ensures mounted E-Series volumes across multiple hosts have files closed, synced cache, and are temporarily unmounted prior to taking snapshots. Want to take a point-in-time snapshot of a parallel file system? The snapshot role also ensures that volumes are also ready across multiple storage systems.
+
+General Notes
+-------------
+ - The EF600/300 platforms currently do not distribute drives across their PCI bridges which can result in lower than expected performance when drives become saturated so be sure to use either eseries_storage_pool_usable_drives to specify the order the drives should be selected or the eseries_storage_pool_configuration usable_drives argument to specify which drives should be used. See details in the `Collection Variables` section. The example below will ensure the drive candidate selections for EF600/300 storage pools or volume groups are selected by alternating between the first and last twelve drives. You can also specify the tray drawers in the form <tray>:<drawer>:<slot>.
+ `eseries_storage_pool_usable_drives: "99:0,99:23,99:1,99:22,99:2,99:21,99:3,99:20,99:4,99:19,99:5,99:18,99:6,99:17,99:7,99:16,99:8,99:15,99:9,99:14,99:10,99:13,99:11,99:12"`
+
+Collection Variables
+--------------------
+**Note that when values are specified below, they indicate the default value.**
+
+ eseries_subnet: # Network subnet to search for the storage system specified in CIDR form. Example: 192.168.1.0/24
+ eseries_template_api_url: # Template for the web services api url. Default: https://0.0.0.0:8443/devmgr/v2/
+ eseries_prefer_embedded: false # Overrides the default behavior of using Web Services Proxy when eseries_proxy_api_url is defined. This will only effect
+ # storage systems that have Embedded Web Services.
+ eseries_validate_certs: true # Indicates Whether SSL certificates should be verified. Used for both embedded and proxy. Choices: true, false
+
+ # Storage system specific variables
+ eseries_proxy_ssid: # Arbitrary string for the proxy to represent the storage system. eseries_system_serial will be used when not defined.
+ eseries_system_serial: # Storage system serial number. (This is located on a label at the top-left towards the front on the device)
+ eseries_system_addresses: # Storage system management IP addresses. Only required when eseries_system_serial or eseries_system_api_url are not
+ # defined. When not specified, addresses will be populated with eseries_management_interfaces controller addresses.
+ eseries_system_api_url: # Url for the storage system's for embedded web services rest api. Example: https://192.168.10.100/devmgr/v2
+ eseries_system_username: admin # Username for the storage system's for embedded web services rest api
+ eseries_system_password: # Password for the storage system's for embedded web services rest api and when the admin password has not been set
+ # eseries_system_password will be used to set it.
+ eseries_system_old_password: # Previous admin password. This is used to change the current admin password by setting this variable to the current
+ # password and eseries_system_password to the new password.
+ eseries_system_tags: # Meta tags to associate with storage system when added to the proxy.
+
+ # Web Services Proxy specific variable
+ Note: eseries_proxy_* variables are required to discover storage systems prior to SANtricity OS version 11.60.2.
+ eseries_proxy_api_url: # Url for the storage system's for proxy web services rest api. Example: https://192.168.10.100/devmgr/v2
+ eseries_proxy_api_username: admin # Username for the storage system's for proxy web services rest api.
+ eseries_proxy_api_password: # Password for the storage system's for proxy web services rest api and when the admin password has
+ # not been set eseries_proxy_api_password will be used to set it.
+ eseries_proxy_api_old_password: # Previous proxy admin password. This is used to change the current admin password by setting this
+ # variable to the current proxy password and eseries_proxy_api_password to the new password.
+ eseries_proxy_monitor_password: # Proxy password for the monitor username
+ eseries_proxy_security_password: # Proxy password for the security username
+ eseries_proxy_storage_password: # Proxy password for the monitor username
+ eseries_proxy_support_password: # Proxy password for the support username
+ eseries_proxy_accept_certifications: # Force automatic acceptance of all storage system's certificate
+ eseries_proxy_default_system_tags: # Default meta tags to associate with all storage systems
+ eseries_proxy_default_password: # Default password to associate with all storage systems. This is overridden by eseries_system_password.
+ eseries_proxy_client_certificate_common_certificates: # List of common proxy client certificate file paths. These files will be appended to each client certificate list.
+ eseries_proxy_client_certificate_certificates: # List of proxy client certificate file paths
+ eseries_proxy_server_certificate_common_certificates: # List of common proxy server certificates. These files will be appended to each controller's server certificate list.
+ eseries_proxy_server_certificate_common_passphrase: # Common passphrase for decrypting PEM (PKCS8) private key.
+ eseries_proxy_server_certificate_certificates: # List of proxy server certificates. Leave blank to use self-signed certificate.
+ eseries_proxy_server_certificate_passphrase: # Passphrase for decrypting PEM (PKCS8) private key.
+
+ # LDAP configuration defaults
+ eseries_proxy_ldap_state: # Whether LDAP should be configured for the proxy`
+ eseries_proxy_ldap_identifier: # The user attributes that should be considered for the group to role mapping
+ eseries_proxy_ldap_user_attribute: # Attribute used to the provided username during authentication.
+ eseries_proxy_ldap_bind_username: # User account that will be used for querying the LDAP server.
+ eseries_proxy_ldap_bind_password: # Password for the bind user account
+ eseries_proxy_ldap_server: # LDAP server URL.
+ eseries_proxy_ldap_search_base: # Search base used for find user's group membership
+ eseries_proxy_ldap_role_mappings: # Dictionary of user groups, each containing the list of access roles.
+ # Role choices: storage.admin - allows users full read/writes access to storage objects and operations.
+ # storage.monitor - allows users read-only access to storage objects and operations.
+ # storage.admin - allows users access to hardware, diagnostic information, major event logs,
+ # and other critical support-related functionality, but not the sorage configuration.
+ # security.admin - allows users access to authentication/authorization configuration, as
+ # well as the audit log configuration, adn certification management.
+
+ # Global storage system information
+ eseries_system_name: # Name of the storage system.
+ eseries_system_cache_block_size: # Cache block size
+ eseries_system_cache_flush_threshold: # Unwritten data will be flushed when exceeds this threshold
+ eseries_system_autoload_balance: # Whether automatic load balancing should be enabled. Choices: enabled, disabled
+ eseries_system_host_connectivity_reporting: # Whether host connectivity reporting should be enabled. Choices: enabled, disabled
+ eseries_system_login_banner_message: # Message that appears prior to the login.
+ eseries_system_default_host_type: # Only required when using something other than Linux kernel 3.10 or later with DM-MP (Linux DM-MP),
+ # non-clustered Windows (Windows), or the storage system default host type is incorrect.
+ # Common host type definitions:
+ # - AIX MPIO: The Advanced Interactive Executive (AIX) OS and the native MPIO driver
+ # - AVT 4M: Silicon Graphics, Inc. (SGI) proprietary multipath driver
+ # - HP-UX: The HP-UX OS with native multipath driver
+ # - Linux ATTO: The Linux OS and the ATTO Technology, Inc. driver (must use ATTO FC HBAs)
+ # - Linux DM-MP: The Linux OS and the native DM-MP driver
+ # - Linux Pathmanager: The Linux OS and the SGI proprietary multipath drive
+ # - Mac: The Mac OS and the ATTO Technology, Inc. driver
+ # - ONTAP: FlexArray
+ # - Solaris 11 or later: The Solaris 11 or later OS and the native MPxIO driver
+ # - Solaris 10 or earlier: The Solaris 10 or earlier OS and the native MPxIO driver
+ # - SVC: IBM SAN Volume Controller
+ # - VMware: ESXi OS
+ # - Windows: Windows Server OS and Windows MPIO with a DSM driver
+ # - Windows Clustered: Clustered Windows Server OS and Windows MPIO with a DSM driver
+ # - Windows ATTO: Windows OS and the ATTO Technology, Inc. driver
+
+ # Role-based username passwords
+ eseries_system_monitor_password: # Storage system monitor username password
+ eseries_system_security_password: # Storage system security username password
+ eseries_system_storage_password: # Storage system storage username password
+ eseries_system_support_password: # Storage system support username password
+
+ # Storage management interface defaults
+ Note: eseries_management_* variables have the lowest priority and will be overwritten by those found in eseries_management_interfaces
+ eseries_management_config_method: # Default config method for all management interfaces. Choices: static, dhcp
+ eseries_management_subnet_mask: # Default subnet mask for all management interfaces
+ eseries_management_gateway: # Default gateway for all management interfaces
+ eseries_management_dns_config_method: # Default DNS config method for all management interfaces
+ eseries_management_dns_address: # Default primary DNS address for all management interfaces
+ eseries_management_dns_address_backup: # Default backup DNS address for all management interfaces
+ eseries_management_ntp_config_method: # Default NTP config method for all management interfaces
+ eseries_management_ntp_address: # Default primary NTP address for all management interfaces
+ eseries_management_ntp_address_backup: # Default backup NTP address for all management interfaces
+ eseries_management_ssh: # Default SSH access for all management interfaces. Choices: true, false
+ eseries_management_interfaces:
+ config_method: # Config method for all management interfaces. Choices: static, dhcp
+ subnet_mask: # Subnet mask for all management interfaces
+ gateway_mask: # Gateway for all management interfaces
+ dns_config_method: # DNS config method for all management interfaces
+ dns_address: # Primary DNS address for all management interfaces
+ dns_address_backup: # Backup DNS address for all management interfaces
+ ntp_config_method: # NTP config method for all management interfaces
+ ntp_address: # Primary NTP address for all management interfaces
+ ntp_address_backup: # Backup NTP address for all management interfaces
+ ssh: # SSH access for all management interfaces. Choices: true, false
+ controller_a: # List of controller A ports
+ - address: # IPv4 address for controller A
+ config_method: # Config method for controller A. Choices: static, dhcp
+ subnet_mask: # Subnet mask for controller A
+ gateway: # Gateway for controller A
+ dns_config_method: # DNS config method for controller A (Note this will apply to every interface of the
+ # controller. To be idempotent, only define for one interface on this controller).
+ dns_address: # Primary DNS address for controller A (Note this will apply to every interface of the
+ # controller. To be idempotent, only define for one interface on this controller).
+ dns_address_backup: # Backup DNS address for controller A (Note this will apply to every interface of the
+ # controller. To be idempotent, only define for one interface on this controller).
+ ntp_config_method: # NTP config method for controller A (Note this will apply to every interface of the
+ # controller. To be idempotent, only define for one interface on this controller).
+ ntp_address: # Primary NTP address for controller A (Note this will apply to every interface of the
+ # controller. To be idempotent, only define for one interface on this controller).
+ ntp_address_backup: # Backup NTP address for controller A (Note this will apply to every interface of the
+ # controller. To be idempotent, only define for one interface on this controller).
+ ssh: # SSH access for controller A (Note this will apply to every interface of the
+ # controller. To be idempotent, only define for one interface on this controller).
+ # Choices: true, false
+ controller_b: # List of controller B ports
+ - (...) # Same as for controller A but for controller B.
+
+ # Alerts configuration defaults
+ eseries_alerts_state: # Whether to enable storage system alerts. Choices: enabled, disabled
+ eseries_alerts_contact: # This allows owner to specify free-form contact information such as email or phone number.
+ eseries_alerts_recipients: # List containing e-mails that should be sent notifications when alerts are issued.
+ eseries_alerts_sender: # Sender email. This does not necessarily need to be a valid e-mail.
+ eseries_alerts_server: # Fully qualified domain name, IPv4 address, or IPv6 address of the mail server.
+ eseries_alerts_test: false # When changes are made to the storage system alert configuration a test e-mail will be sent. Choices: true, false
+ eseries_alert_syslog_servers: # List of dictionaries where each dictionary contains a syslog server entry. [{"address": <address>, "port": <port>}]
+ eseries_alert_syslog_test: false # Whether alerts syslog servers configuration test message should be sent. Choices: true, false
+
+ # LDAP configuration defaults
+ eseries_ldap_state: # Whether LDAP should be configured
+ eseries_ldap_identifier: # The user attributes that should be considered for the group to role mapping
+ eseries_ldap_user_attribute: # Attribute used to the provided username during authentication.
+ eseries_ldap_bind_username: # User account that will be used for querying the LDAP server.
+ eseries_ldap_bind_password: # Password for the bind user account
+ eseries_ldap_server: # LDAP server URL.
+ eseries_ldap_search_base: # Search base used for find user's group membership
+ eseries_ldap_role_mappings: # Dictionary of user groups, each containing the list of access roles.
+ # Role choices: storage.admin - allows users full read/writes access to storage objects and operations.
+ # storage.monitor - allows users read-only access to storage objects and operations.
+ # storage.admin - allows users access to hardware, diagnostic information, major event logs,
+ # and other critical support-related functionality, but not the sorage configuration.
+ # security.admin - allows users access to authentication/authorization configuration, as
+ # well as the audit log configuration, adn certification management.
+
+ # SSL/TLS certificate configurations - Note that both individual certificates or wildcard certificates are accepted.
+ eseries_client_certificate_remove_unspecified_user_certificates: # Whether existing user certificates should be automatically removed. (Default: True)
+ eseries_client_certificate_common_certificates: # List of common client certificate file paths. These files will be appended to each client certificate list.
+ eseries_client_certificate_certificates: # List of client certificate file paths
+ eseries_server_certificate_common_certificates: # List of common server certificates. These files will be appended to each controller's server certificate list.
+ eseries_server_certificate_common_passphrase: # Common passphrase for decrypting PEM (PKCS8) private key.
+ eseries_server_certificate:
+ controller_a:
+ certificates: # List of server certificates for the storage systems controller A. Leave blank to use self-signed certificate.
+ passphrase: # Passphrase for decrypting PEM (PKCS8) private key.
+ controller_b:
+ certificates: # List of server certificates for the storage systems controller B. Leave blank to use self-signed certificate.
+ passphrase: # Passphrase for decrypting PEM (PKCS8) private key.
+
+ # Drive firmware defaults
+ eseries_drive_firmware_firmware_list: # Local path list for drive firmware.
+ eseries_drive_firmware_wait_for_completion: # Forces drive firmware upgrades to wait for all associated tasks to complete. Choices: true, false
+ eseries_drive_firmware_ignore_inaccessible_drives: # Forces drive firmware upgrades to ignore any inaccessible drives. Choices: true, false
+ eseries_drive_firmware_upgrade_drives_online: # Forces drive firmware upgrades to be performed while I/Os are accepted. Choices: true, false
+
+ # Controller firmware defaults
+ eseries_firmware_nvsram: # Local path for NVSRAM file.
+ eseries_firmware_firmware: # Local path for controller firmware file.
+ eseries_firmware_wait_for_completion: # Forces controller firmware upgrade to wait until upgrade has completed before continuing. Choices: true, false
+ eseries_firmware_clear_mel_events: # Forces firmware upgrade to be attempted regardless of the health check results. Choices: true, false
+
+ # Auto-Support configuration defaults
+ eseries_asup_state: # Whether auto support (ASUP) should be enabled. Choices: enabled, disabled
+ eseries_asup_active: # Allows NetApp support personnel to request support data to resolve issues. Choices: true, false
+ eseries_asup_days: # List of days of the week. Choices: monday, tuesday, wednesday, thursday, friday, saturday, sunday
+ eseries_asup_start: # Hour of the day(s) to start ASUP bundle transmissions. Start time must be less than end time. Choices: 0-23
+ eseries_asup_end: # Hour of the day(s) to end ASUP bundle transmissions. Start time must be less than end time. Choices: 1-24
+ eseries_asup_method: # ASUP delivery method. Choices https, http, email (default: https)
+ eseries_asup_routing_type: # ASUP delivery routing type for https or http. Choices: direct, proxy, script (default: direct)
+ eseries_asup_proxy: # ASUP proxy delivery method information.
+ host: # ASUP proxy host IP address or FQDN. When eseries_asup_routing_type==proxy this must be specified.
+ port: # ASUP proxy host port. When eseries_asup_routing_type==proxy this must be specified.
+ username: # ASUP proxy username.
+ password: # ASUP proxy password.
+ script: # ASUP proxy host script.
+ eseries_asup_email: # ASUP email delivery configuration information
+ server: # ASUP email server
+ sender: # ASUP email sender
+ test_recipient: # ASUP configuration mail test recipient
+ eseries_maintenance_duration: # Duration in hours (1-72) the ASUP maintenance mode will be active
+ eseries_maintenance_emails: # List of email addresses for maintenance notifications
+ eseries_asup_validate: # Verify ASUP configuration prior to applying changes
+
+ # Audit-log configuration defaults
+ eseries_auditlog_enforce_policy: # Whether to make audit-log policy changes. Choices: true, false
+ eseries_auditlog_force: # Forces audit-log to delete log messages when fullness threshold has been exceeded. Choices: true, false
+ eseries_auditlog_full_policy: # Policy for what to do when record limit has been reached. Choices: overWrite, preventSystemAccess
+ eseries_auditlog_log_level: # Filters logs based on the specified level. Choices: all, writeOnly
+ eseries_auditlog_max_records: # Maximum number of audit-log messages retained. Choices: 100-50000.
+ eseries_auditlog_threshold: # Memory full percentage threshold that audit-log will start issuing warning messages. Choices: 60-90
+
+ # Syslog configuration defaults
+ eseries_syslog_state: # Whether syslog servers should be added or removed from storage system. Choices: present, absent
+ eseries_syslog_address: # Syslog server IPv4 address or fully qualified hostname.
+ eseries_syslog_test: # Whether a test messages should be sent to syslog server when added to the storage system. Choices: true, false
+ eseries_syslog_protocol: # Protocol to be used when transmitting log messages to syslog server. Choices: udp, tc, tls
+ eseries_syslog_port: # Port to be used when transmitting log messages to syslog server.
+ eseries_syslog_components: # List of components log to syslog server. Choices: auditLog, (others may become available)
+
+ # iSCSI target discovery specifications
+ Note: add the following to ansible-playbook command to update the chap secret: --extra-vars "eseries_target_chap_secret_update=True
+ eseries_iscsi_target_name: # iSCSI target name that will be seen by the initiator
+ eseries_iscsi_target_ping: True # Enables ICMP ping response from the configured iSCSI ports (boolean)
+ eseries_iscsi_target_unnamed_discovery: True # Whether the iSCSI target iqn should be returned when an initiator performs a discovery session.
+ eseries_iscsi_target_chap_secret: # iSCSI chap secret. When left blank, the chap secret will be removed from the storage system.
+ eseries_iscsi_target_chap_secret_update: False # DO NOT REMOVE! Since na_santricity_iscsi_target cannot compare the chap secret with the current and
+ # will always return changed=True, this flag is used to force the module to update the chap secret.
+ # Leave this value False and to add the --extra-vars "eseries_target_chap_secret_update=True".
+
+ # Controller iSCSI Interface Port Default Policy Specifications
+ eseries_controller_iscsi_port_state: enabled # Generally specifies whether a controller port definition should be applied Choices: enabled, disabled
+ eseries_controller_iscsi_port_config_method: dhcp # General port configuration method definition for both controllers. Choices: static, dhcp
+ eseries_controller_iscsi_port_gateway: # General port IPv4 gateway for both controllers.
+ eseries_controller_iscsi_port_subnet_mask: # General port IPv4 subnet mask for both controllers.
+ eseries_controller_iscsi_port_mtu: 9000 # General port maximum transfer units (MTU) for both controllers. Any value greater than 1500 (bytes).
+ eseries_controller_iscsi_port:
+ controller_a: # Controller A port definition. Ordered list of port definitions reading iSCSI ports left to right
+ - state: # Whether the port should be enabled. Choices: enabled, disabled
+ config_method: # Port configuration method Choices: static, dhcp
+ address: # Port IPv4 address
+ gateway: # Port IPv4 gateway
+ subnet_mask: # Port IPv4 subnet_mask
+ mtu: # Port IPv4 mtu
+ controller_b: # Controller B port definition.
+ - (...) # Same as controller A but for controller B
+
+ # Controller InfiniBand iSER Interface Channel
+ eseries_controller_ib_iser_port:
+ controller_a: # Ordered list of controller A channel address definition.
+ - # Port IPv4 address for channel 1
+ controller_b: # Ordered list of controller B channel address definition.
+ - (...) # Same as controller A but for controller B
+
+ # Controller NVMe over InfiniBand Interface Channel
+ eseries_controller_nvme_ib_port:
+ controller_a: # Ordered list of controller A channel address definition.
+ - # Port IPv4 address for channel 1
+ controller_b: # Ordered list of controller B channel address definition.
+ - (...) # Same as controller A but for controller B
+
+ # Controller NVMe RoCE Interface Port Default Policy Specifications
+ eseries_controller_nvme_roce_port_state: enabled # Specifies whether a controller port definition should be applied. Choices: enabled, disabled
+ eseries_controller_nvme_roce_port_config_method: dhcp # Port configuration method definition for both controllers. Choices: static, dhcp
+ eseries_controller_nvme_roce_port_gateway: # Port IPv4 gateway for both controllers.
+ eseries_controller_nvme_roce_port_subnet_mask: # Port IPv4 subnet mask for both controllers.
+ eseries_controller_nvme_roce_port_mtu: 4200 # Port maximum transfer units (MTU). Any value greater than 1500 (bytes).
+ eseries_controller_nvme_roce_port_speed: auto # Interface speed. Value must be a supported speed or auto to negotiate the speed with the port.
+ eseries_controller_nvme_roce_port:
+ controller_a: # Controller A port definition. List containing ports definitions.
+ - channel: # Channel of the port to modify. This will be a numerical value that represents the port; typically read
+ # left to right on the HIC.
+ state: # Whether the port should be enabled.
+ config_method: # Port configuration method Choices: static, dhcp
+ address: # Port IPv4 address
+ gateway: # Port IPv4 gateway
+ subnet_mask: # Port IPv4 subnet_mask
+ mtu: # Port IPv4 mtu
+ speed: # Port IPv4 speed
+ controller_b: # Controller B port definition.
+ - (...) # Same as controller A but for controller B
+
+ # Storage Pool Default Policy Specifications
+ eseries_storage_pool_state: present # Default storage pool state. Choices: present, absent
+ eseries_storage_pool_raid_level: raidDiskPool # Default volume raid level. Choices: raid0, raid1, raid5, raid6, raidDiskPool
+ eseries_storage_pool_secure_pool: false # Default for storage pool drive security. This flag will enable the security at rest feature. There
+ # must be sufficient FDE or FIPS security capable drives. Choices: true, false
+ eseries_storage_pool_criteria_drive_count: # Default storage pool drive count.
+ eseries_storage_pool_reserve_drive_count: # Default reserve drive count for drive reconstruction for storage pools using dynamic disk pool and
+ # the raid level must be set for raidDiskPool.
+ eseries_storage_pool_criteria_min_usable_capacity: # Default minimum required capacity for storage pools.
+ eseries_storage_pool_criteria_drive_type: # Default drive type for storage pools. Choices: hdd, ssd
+ eseries_storage_pool_criteria_drive_interface_type: # Default interface type to use when selecting drives for the storage pool.
+ eseries_storage_pool_criteria_size_unit: gb # Default unit size for all storage pool related sizing.
+ # Choices: bytes, b, kb, mb, gb, tb, pb, eb, zb, yb
+ eseries_storage_pool_criteria_drive_min_size: # Default minimum drive size for storage pools.
+ eseries_storage_pool_criteria_drive_max_size: # Default maximum drive size for storage pools.
+ eseries_storage_pool_criteria_drive_require_da: # Default for whether storage pools are required to have data assurance (DA) compatible drives.
+ # Choices: true, false
+ eseries_storage_pool_criteria_drive_require_fde: # Default for whether storage pools are required to have drive security compatible drives.
+ # Choices: true, false
+ eseries_storage_pool_remove_volumes: # Default policy for deleting volumes prior to removing storage pools.
+ eseries_storage_pool_erase_secured_drives: # Default policy for erasing the content drives during create and delete storage pool operations.
+ # Choices: true, false
+ eseries_storage_pool_ddp_critical_threshold_pct: # Default policy for dynamic disk pool alert critical threshold.
+ eseries_storage_pool_ddp_warning_threshold_pct: # Default policy for dynamic disk pool alert warning threshold.
+
+ # Volume Default Policy Specifications
+ eseries_volume_state: present # Default volume state. Choices: present, absent
+ eseries_volume_size_unit: gb # Default unit size for all volume sizing options.
+ # Choices: bytes, b, kb, mb, gb, tb, pb, eb, zb, yb, pct
+ eseries_volume_size: # Default volume size or the presented size for thinly provisioned volumes.
+ eseries_volume_data_assurance_enabled: # Default for whether data assurance(DA) is required to be enabled.
+ eseries_volume_segment_size_kb: # Default segment size measured in kib.
+ eseries_volume_read_cache_enable: # Default for read caching which will cache all read requests.
+ eseries_volume_read_ahead_enable: # Default for read ahead caching; this is good for sequential workloads to cache subsequent blocks.
+ eseries_volume_write_cache_enable: # Default for write caching which will cache all writes.
+ eseries_volume_write_cache_mirror_enable: # Default for write cache mirroring which mirrors writes to both controller's cache.
+ eseries_volume_cache_without_batteries: # Default for allowing caching when batteries are not present.
+ eseries_volume_thin_provision: # Default for whether volumes should be thinly provisioned.
+ eseries_volume_thin_volume_repo_size: # Default for actually allocated space for thinly provisioned volumes.
+ eseries_volume_thin_volume_max_repo_size: # Default for the maximum allocated space allowed for thinly provisioned volumes.
+ eseries_volume_thin_volume_expansion_policy: # Default thin volume expansion policy. Choices: automatic, manual
+ eseries_volume_thin_volume_growth_alert_threshold: # Default thin volume growth alert threshold; this is the threshold for when the thin volume expansion
+ # policy will be enacted. Allowable values are between and including 10% and 99%
+ eseries_volume_ssd_cache_enabled: # Default for ssd cache which will enable the volume to use an existing SSD cache on the storage array
+ eseries_volume_host: # Default host for all volumes; the value can be any host from the Ansible inventory.
+ eseries_volume_workload_name: # Default workload tag name
+ eseries_volume_workload_metadata: # Default workload metadata
+ eseries_volume_volume_metadata: # Default volume_metadata
+ eseries_volume_owning_controller # Default preferred owning controller
+ eseries_volume_wait_for_initialization: false # Default for whether volume creation with wait for initialization to complete
+
+ # Storage Pool-Volume Mapping Default Policy Specifications
+ eseries_lun_mapping_state: present # Generally specifies whether a LUN mapping should be present. This is useful when adding a default host for all
+ # volumes. Choices: present, absent
+ eseries_lun_mapping_host: # Default host for all volumes not specifically give a host either in common_volume_configuration or in
+ # eseries_storage_pool_configuration.
+
+ # Storage Pool-Volume Default Policy Specifications
+ Name schemes: Storage pool and volume names can be used to specify a naming scheme to produce a list of storage pools and volumes. Schemes are defined by
+ brackets and can be used to specify a range of lowercase letters, uppercase letters, range of single digit numbers, any top-level inventory
+ variables, and [pool] to use the current defined storage pool (volume only).
+ eseries_storage_pool_configuration:
+ - name: # Name or name scheme (see above) for the storage pool.
+ state: # Specifies whether the storage pool should exist. Choices: present, absent
+ raid_level # Volume group raid level. Choices: raid0, raid1, raid5, raid6, raidDiskPool (Default: raidDiskPool)
+ secure_pool: # Default for storage pool drive security. This flag will enable the security at rest feature. There must be
+ # sufficient FDE or FIPS security capable drives. Choices: true, false
+ criteria_drive_count: # Default storage pool drive count.
+ criteria_volume_count: # Number of equally sized volumes to create. All available storage pool space will be used. The option will
+ # be ignored if volumes is defined.
+ criteria_reserve_free_capacity_pct # Percent of reserve free space capacity to leave when creating the criteria_volume_count volumes.
+ common_volume_host # Host or host group for the criteria_volume_count volumes should be mapped.
+ reserve_drive_count: # Default reserve drive count for drive reconstruction for storage pools using dynamic disk pool and the raid
+ # level must be set for raidDiskPool.
+ criteria_size_unit: # Default unit size for all storage pool related sizing. Choices: bytes, b, kb, mb, gb, tb, pb, eb, zb, yb
+ criteria_min_usable_capacity: # Default minimum required capacity for storage pools.
+ criteria_drive_type: # Default drive type for storage pools. Choices: hdd, ssd
+ criteria_drive_interface_type # Interface type to use when selecting drives for the storage pool.
+ # Choices: scsi, fibre, sata, pata, fibre520b, sas, sas4k, nvme4k
+ criteria_drive_min_size: # Default minimum drive size for storage pools.
+ criteria_drive_max_size: # Default maximum drive size for storage pools.
+ criteria_drive_require_da: # Ensures storage pools have data assurance (DA) compatible drives. Choices: true, false
+ criteria_drive_require_fde: # Ensures storage pools have drive security compatible drives. Choices: true, false
+ remove_volumes: # Ensures volumes are deleted prior to removing storage pools.
+ erase_secured_drives: # Ensures data is erased during create and delete storage pool operations. Choices: true, false
+ common_volume_configuration: # Any option that can be specified at the volume level can be generalized here at the storage pool level.
+ volumes: # List of volumes associated the storage pool.
+ - state: # Specifies whether the volume should exist (present, absent)
+ name: # (required) Name or name scheme (see above) for the volume(s) to be created in the storage pool(s)
+ host: # host or host group for the volume should be mapped to.
+ host_type: # Only required when using something other than Linux kernel 3.10 or later with DM-MP (Linux DM-MP),
+ # non-clustered Windows (Windows), or the storage system default host type is incorrect.
+ # Common host type definitions:
+ # - AIX MPIO: The Advanced Interactive Executive (AIX) OS and the native MPIO driver
+ # - AVT 4M: Silicon Graphics, Inc. (SGI) proprietary multipath driver
+ # - HP-UX: The HP-UX OS with native multipath driver
+ # - Linux ATTO: The Linux OS and the ATTO Technology, Inc. driver (must use ATTO FC HBAs)
+ # - Linux DM-MP: The Linux OS and the native DM-MP driver
+ # - Linux Pathmanager: The Linux OS and the SGI proprietary multipath driver
+ # - Mac: The Mac OS and the ATTO Technology, Inc. driver
+ # - ONTAP: FlexArray
+ # - Solaris 11 or later: The Solaris 11 or later OS and the native MPxIO driver
+ # - Solaris 10 or earlier: The Solaris 10 or earlier OS and the native MPxIO driver
+ # - SVC: IBM SAN Volume Controller
+ # - VMware: ESXi OS
+ # - Windows: Windows Server OS and Windows MPIO with a DSM driver
+ # - Windows Clustered: Clustered Windows Server OS and Windows MPIO with a DSM driver
+ # - Windows ATTO: Windows OS and the ATTO Technology, Inc. driver
+ owning_controller: # Specifies which controller will be the primary owner of the volume. Not specifying will allow the
+ # controller to choose ownership. (Choices: A, B)
+ size: # Size of the volume or presented size of the thinly provisioned volume.
+ size_unit: # Unit size for the size, thin_volume_repo_size, and thin_volume_max_repo_size
+ # Choices: bytes, b, kb, mb, gb, tb, pb, eb, zb, yb, pct
+ segment_size_kb: # Indicates the amount of data stored on a drive before moving on to the next drive in the volume group.
+ thin_provision: # Whether volumes should be thinly provisioned.
+ thin_volume_repo_size: # Actually allocated space for thinly provisioned volumes.
+ thin_volume_max_repo_size: # Maximum allocated space allowed for thinly provisioned volumes.
+ thin_volume_expansion_policy: # Thin volume expansion policy. Choices: automatic, manual
+ thin_volume_growth_alert_threshold: # Thin volume growth alert threshold; this is the threshold for when the thin volume expansion
+ # policy will be enacted. Allowable values are between and including 10% and 99%
+ ssd_cache_enabled: # Enables ssd cache which will enable the volume to use an existing SSD cache on the storage array.
+ data_assurance_enabled: # Enables whether data assurance(DA) is required to be enabled.
+ read_cache_enable: # Enables read caching which will cache all read requests.
+ read_ahead_enable: # Enables read ahead caching; this is good for sequential workloads to cache subsequent blocks.
+ write_cache_enable: # Enables write caching which will cache all writes.
+ write_cache_mirror_enable: # Enables write cache mirroring which mirrors writes to both controller's cache.
+ cache_without_batteries: # Enable caching even without batteries.
+
+ wait_for_initialization: # Whether volume creation with wait for initialization to complete
+ workload_name: # Name of the volume's workload
+ workload_metadata: # Dictionary containing arbitrary entries normally used for defining the volume(s) workload.
+ volume_metadata # Dictionary containing arbitrary entries used to define information about the volume itself.
+ # Note: format_type, format_options, mount_dir, mount_options are used by netapp_eseries.host.mount role
+ # to format and mount volumes.
+
+
+ # Snapshot Consistency Group Default Policy Specifications
+ eseries_snapshot_remove_unspecified: # Whether to remove any snapshot group or view that is not specified (Default: false).
+ eseries_snapshot_groups_maximum_snapshots: # Default maximum point-in-time snapshot images (Default: 32).
+ eseries_snapshot_groups_reserve_capacity_pct: # Default reserve capacity percentage (Default: 40)
+ eseries_snapshot_groups_preferred_reserve_storage_pool: # Preferred storage pool or volume group for the reserve capacity volume.
+ eseries_snapshot_groups_reserve_capacity_full_policy: # Default full reserve capacity policy (Default: purge). Choices: [purge, reject]
+ eseries_snapshot_groups_alert_threshold_pct: # Default reserve capacity percentage full to alert administrators (Default 75).
+ eseries_snapshot_groups:
+ - name: # Name of snapshot consistency group.
+ maximum_snapshots: # Maximum allowed snapshot point-in-time images for consistency group (Default: 32).
+ reserve_capacity_pct: # Reserve capacity measured as a percentage of the base volume (Default: 40). Reserve capacity can be expanded
+ # and trimmed; however, the trim operation requires there be no base volume snapshots images in the group.
+ reserve_capacity_full_policy: # Policy to implement when reserve capacity is full (Default: purge). Choices [purge, reject]
+ alert_threshold_pct: # Reserve capacity full alert threshold for storage system administrators (Default: 75).
+ rollback_priority: # Storage system priority for base volume rollback (Default: medium). Choices [lowest, low, medium, high, highest]
+ volumes: # Information for each volume in the consistency group.
+ - volume: # Base volume name
+ reserve_capacity_pct: # Reserve capacity measured as a percentage of the base volume (Default: 40). Reserve capacity can be expanded
+ # and trimmed; however, the trim operation requires there be no base volume snapshots images in the group.
+ preferred_reserve_storage_pool: # Preferred reserve capacity storage pool or volume group. This will default to the base volume's
+ # storage pool or volume group. The reserve capacity volume cannot be changed once created.
+ - (...)
+
+ # Snapshot Consistency Group View Default Policy Specifications
+ eseries_snapshot_views_host: # Default host or host group to map all snapshot volumes.
+ eseries_snapshot_views_reserve_capacity_pct: # Default reserve capacity percentage (Default: 40)
+ eseries_snapshot_views_preferred_reserve_storage_pool: # Preferred storage pool or volume group for the reserve capacity volume.
+ eseries_snapshot_views_alert_threshold_pct: # Default reserve capacity percentage full to alert administrators (Default 75).
+ eseries_snapshot_views_writable: # Default for whether to make snapshot volumes writable.
+ eseries_snapshot_views_validate: # Default for whether to validate snapshot volumes after creation.
+ eseries_snapshot_views:
+ - volume: # Consistency group's snapshot view's name.
+ group_name: # Snapshot consistency group's name.
+ pit_name: # Point-in-time snapshot images group name. (Only available when specified using Ansible na_santricity_module (via direct or role)
+ pit_timestamp: # Point-in-time snapshot images group timestamp. Snapshot image timestamp in the YYYY-MM-DD HH:MM:SS (AM|PM) (hours, minutes, seconds, and day-period are optional)
+ host: # Host or host group to map snapshot volumes.
+ writable: # Whether snapshot volume of base volume images should be writable.
+ validate: # Whether snapshot volume should be validated which includes both a media scan and parity validation.
+ reserve_capacity_pct: # Percentage of base volume capacity to reserve for snapshot copy-on-writes (COW). Only used when snapshot volume is writable.
+ preferred_reserve_storage_pool: # Preferred storage pool or volume group for the reserve capacity volume.
+ alert_threshold: # Reserve capacity percentage full to alert administrators
+ volumes: # (Optional) Select subset of volumes within the snapshot consistency group.
+ - name: # Name of volume within consistency group.
+ host: # Host or host group to map snapshot volumes.
+ lun: # Logical unit number (LUN) mapping for the host or host group.
+ writable: # Whether snapshot volume of base volume images should be writable.
+ validate: # Whether snapshot volume should be validated which includes both a media scan and parity validation.
+ reserve_capacity_pct: # Percentage of base volume capacity to reserve for snapshot copy-on-writes (COW). Only used when snapshot volume is writable.
+ preferred_reserve_storage_pool: # Preferred storage pool or volume group for the reserve capacity volume.
+
+ # Snapshot Consistency Group Rollback Default Policy Specifications
+ eseries_snapshot_rollback_priority: medium # Default point-in-time rollback priority (Default: medium). Choices [lowest, low, medium, high, highest]
+ eseries_snapshot_rollback_backup: true # Default whether snapshot should be taken prior to rolling back base volumes (Default: true).
+ eseries_snapshot_rollbacks:
+ - group_name: # Snapshot consistency group's name.
+ pit_name: # Point-in-time snapshot images group name (Only available when specified using Ansible na_santricity_module (via direct or role)
+ pit_timestamp: # Point-in-time snapshot images group timestamp. Snapshot image timestamp in the YYYY-MM-DD HH:MM:SS (AM|PM) (hours, minutes, seconds, and day-period are optional)
+ rollback_priority: # Storage system priority for base volume rollback (Default: medium). Choices [lowest, low, medium, high, highest]
+ rollback_backup: # Whether to create point-in-time snapshot images of the consistency group prior to rollback.
+ volumes:
+
+ # Initiator-Target Protocol Variable Defaults
+ Note that the following commands need to produce a unique list of IQNs or WWNs of the interfaces used, line separated. Overwrite as necessary.
+ eseries_initiator_protocol: fc # Storage system protocol. Choices: fc, iscsi, sas, ib_iser, ib_srp, nvme_ib, nvme_fc, nvme_roce
+ eseries_initiator_command:
+ fc:
+ linux: "cat /sys/class/fc_host/host*/port_name | sort | uniq"
+ windows: "(Get-InitiatorPort | Where-Object -P ConnectionType -EQ 'Fibre Channel' | Select-Object -Property PortAddress |
+ Format-Table -AutoSize -HideTableHeaders | Out-String).trim()"
+ iscsi:
+ linux: "grep -o iqn.* /etc/iscsi/initiatorname.iscsi"
+ windows: "(get-initiatorPort | select-object -property nodeaddress | sort-object | get-unique | ft -autoSize | out-string -stream |
+ select-string iqn | out-string).trim()"
+ sas:
+ # NetApp IMT for SAS attached E-Series SAN hosts recommends adding all possible SAS addresses with the base address
+ # starting at 0, and the last address ending in 3 for single port HBAs, or 7 for dual port HBAs. Since determining
+ # single vs . dual port HBAs adds complexity, we always add all 8 possible permutations of the SAS address.
+ linux: "cat /sys/class/sas_host/host*/device/scsi_host/*/host_sas_address | sort | uniq"
+ windows: "(Get-InitiatorPort | Where-Object -P ConnectionType -EQ 'SAS' | Select-Object -Property PortAddress | Format-Table -AutoSize -HideTableHeaders | Out-String).trim()"
+ ib_iser:
+ linux: "grep -o iqn.* /etc/iscsi/initiatorname.iscsi"
+ windows: "" # add windows command for determining host iqn address(es)
+ ib_srp:
+ linux: "for fp in /sys/class/infiniband/*/ports/*/gids/*; do out=`cat $fp | tr -d :`; port=`expr substr $out 17 32`; if [ $port != 0000000000000000 ]; then echo 0x$port; fi; done | sort | uniq"
+ windows: "" # add windows command for determining host guid
+ nvme_ib:
+ linux: "grep -o nqn.* /etc/nvme/hostnqn"
+ windows: "" # add windows command for determining host nqn address(es)
+ nvme_fc:
+ linux: "grep -o nqn.* /etc/nvme/hostnqn"
+ windows: "" # add windows command for determining host nqn address(es)
+ nvme_roce:
+ linux: "grep -o nqn.* /etc/nvme/hostnqn"
+ windows: "" # add windows command for determining host nqn address(es)
+
+ # Manual host definitions, Linux and Windows systems can be automatically populated based on host mappings found in eseries_storage_pool_configuration
+ Note that using the automated method is preferred.
+ eseries_host_force_port: true # Default for whether ports are to be allowed to be re-assigned (boolean)
+ eseries_host_remove_unused_hostgroup: true # Forces any unused groups to be removed
+ eseries_host_object:
+ - name: # Host label as referenced by the storage array.
+ state: # Specifies whether host definition should be exist. Choices: present, absent
+ ports: # List of port definitions
+ - type: # Port protocol definition (iscsi, fc, sas, ib, nvme). Note that you should use 'iscsi' prior to SANtricity version 11.60 for IB iSER.
+ label: # Arbitrary port label
+ port: # Port initiator (iqn, wwn, etc)
+ group: # Host's host group
+ host_type: # Only required when using something other than Linux kernel 3.10 or later with DM-MP (Linux DM-MP),
+ # non-clustered Windows (Windows), or the storage system default host type is incorrect.
+ # Common host type definitions:
+ # - AIX MPIO: The Advanced Interactive Executive (AIX) OS and the native MPIO driver
+ # - AVT 4M: Silicon Graphics, Inc. (SGI) proprietary multipath driver
+ # - HP-UX: The HP-UX OS with native multipath driver
+ # - Linux ATTO: The Linux OS and the ATTO Technology, Inc. driver (must use ATTO FC HBAs)
+ # - Linux DM-MP: The Linux OS and the native DM-MP driver
+ # - Linux Pathmanager: The Linux OS and the SGI proprietary multipath driver
+ # - Mac: The Mac OS and the ATTO Technology, Inc. driver
+ # - ONTAP: FlexArray
+ # - Solaris 11 or later: The Solaris 11 or later OS and the native MPxIO driver
+ # - Solaris 10 or earlier: The Solaris 10 or earlier OS and the native MPxIO driver
+ # - SVC: IBM SAN Volume Controller
+ # - VMware: ESXi OS
+ # - Windows: Windows Server OS and Windows MPIO with a DSM driver
+ # - Windows Clustered: Clustered Windows Server OS and Windows MPIO with a DSM driver
+ # - Windows ATTO: Windows OS and the ATTO Technology, Inc. driver
+
+Remove Inventory Configuration
+------------------------------
+Whether its for a testing automation or a temporary project, its helpful to be able to undo all that you configured. Just set `eseries_remove_all_configuration: True` and nar_santricity_host will remove all it configured. Be aware that this feature does not know the previous state of anything; it simply removes anything specified in the inventory.
+
+License
+-------
+ BSD-3-Clause
+
+Maintainer Information
+------------------
+ - Nathan Swartz (@ndswartz)
+ - Joe McCormick (@iamjoemccormick)
+ - Tracy Cummins (@tracycummins)
+
+=============
+Release Notes
+=============
+
+v1.2.13
+=======
+
+Bugfixes
+--------
+
+- Fix availability of client certificate change.
+
+v1.2.12
+=======
+
+Bugfixes
+--------
+
+- Fix host and host port names from being changed to lower case.
+
+v1.2.11
+=======
+
+Bugfixes
+--------
+
+- Fix login banner message option bytes error in na_santricity_global.
+
+v1.2.10
+=======
+
+Minor Changes
+-------------
+
+- Add login banner message to na_santricity_global module and nar_santricity_management role.
+- Add usable drive option for na_santricity_storagepool module and nar_santricity_host role which can be used to choose selected drives for storage pool/volumes or define a pattern drive selection.
+
+Bugfixes
+--------
+
+- Fix PEM certificate/key imports in the na_santricity_server_certificate module.
+- Fix na_santricity_mgmt_interface IPv4 and IPv6 form validation.
+
+v1.2.9
+======
+
+Minor Changes
+-------------
+
+- Add eseries_system_old_password variable to faciliate changing the storage system's admin password.
+- Add remove_unspecified_user_certificates variable to the client certificates module.
+
+Bugfixes
+--------
+
+- Fix missing proxy client and server certificate in management role.
+- Fix missing proxy validate_certs and change current proxy password variables.
+- Fix server certificate module not forwarding certificate imports to the embedded web services.
+
+v1.2.8
+======
+
+Bugfixes
+--------
+
+- Fix pkcs8 private key passphrase issue.
+- Fix storage system admin password change from web services proxy in na_santricity_auth module.
+
+v1.2.7
+======
+
+v1.2.6
+======
+
+Bugfixes
+--------
+
+- Fix jinja issue with collecting certificates paths in nar_santricity_management role.
+
+v1.2.5
+======
+
+Bugfixes
+--------
+
+- Add missing http(s) proxy username and password parameters from na_santricity_asup module and nar_santricity_management role."
+- Add missing storage pool configuration parameter, criteria_drive_interface_type, to nar_santricity_host role.
+
+v1.2.4
+======
+
+v1.2.3
+======
+
+Minor Changes
+-------------
+
+- Added nvme4k as a drive type interface to the na_santricity_storagepool module.
+- Added options for critical and warning threshold setting in na_santricity_storagepool module and nar_santricity_host role.
+- Fix dynamic disk pool critical and warning threshold settings.
+
+Bugfixes
+--------
+
+- Fix drive firmware upgrade issue that prevented updating firware when drive was in use.
+
+v1.2.2
+======
+
+v1.2.1
+======
+
+Release Summary
+---------------
+
+Release 1.2.2 simply removes resource-provisioned volumes feature from collection.
+
+
+Minor Changes
+-------------
+
+- Add IPv6 and FQDN support for NTP
+- Add IPv6 support for DNS
+- Add criteria_drive_max_size option to na_santricity_storagepool and nar_santricity_host role.
+- Add resource-provisioned volumes option to globals and nar_santricity_management role.
+- Remove resource-provisioned volumes setting from na_santicity_global module and nar_santricity_management role."
+
+v1.2.0
+======
+
+Release Summary
+---------------
+
+1.2.0 release of ``netapp_eseries.santricity`` collection on 2021-03-01.
+
+Minor Changes
+-------------
+
+- na_santricity_discover - Add support for discovering storage systems directly using devmgr/v2/storage-systems/1/about endpoint since its old method of discover is being deprecated.
+- na_santricity_facts - Add storage system information to facilitate ``netapp_eseries.host`` collection various protocol configuration.
+- na_santricity_server_certificate - New module to configure storage system's web server certificate configuration.
+- na_santricity_snapshot - New module to configure NetApp E-Series Snapshot consistency groups any number of base volumes.
+- na_santricity_volume - Add percentage size unit (pct) and which allows the creates volumes based on the total storage pool size.
+- nar_santricity_host - Add eseries_storage_pool_configuration list options, criteria_volume_count, criteria_reserve_free_capacity_pct, and common_volume_host to facilitate volumes based on percentages of storage pool or volume group.
+- nar_santricity_host - Add support for snapshot group creation.
+- nar_santricity_host - Improve host mapping information discovery.
+- nar_santricity_host - Improve storage system discovery related error messages.
+- nar_santricity_management - Add support for server certificate management.
+
+Bugfixes
+--------
+
+- nar_santricity_host - Fix README.md examples.
+
+v1.1.0
+======
+
+Release Summary
+---------------
+
+This release focused on providing volume details to through the netapp_volumes_by_initiators in the na_santricity_facts module, improving on the nar_santricity_common role storage system API information and resolving issues.
+
+Minor Changes
+-------------
+
+- Add functionality to remove all inventory configuration in the nar_santricity_host role. Set configuration.eseries_remove_all_configuration=True to remove all storage pool/volume configuration, host, hostgroup, and lun mapping configuration.
+- Add host_types, host_port_protocols, host_port_information, hostside_io_interface_protocols to netapp_volumes_by_initiators in the na_santricity_facts module.
+- Add storage pool information to the volume_by_initiator facts.
+- Add storage system not found exception to the common role's build_info task.
+- Add volume_metadata option to na_santricity_volume module, add volume_metadata information to the netapp_volumes_by_initiators dictionary in na_santricity_facts module, and update the nar_santricity_host role with the option.
+- Improve nar_santricity_common storage system api determinations; attempts to discover the storage system using the information provided in the inventory before attempting to search the subnet.
+- Increased the storage system discovery connection timeouts to 30 seconds to prevent systems from not being discovered over slow connections.
+- Minimize the facts gathered for the host initiators.
+- Update ib iser determination to account for changes in firmware 11.60.2.
+- Use existing Web Services Proxy storage system identifier when one is already created and one is not provided in the inventory.
+- Utilize eseries_iscsi_iqn before searching host for iqn in nar_santricity_host role.
+
+Bugfixes
+--------
+
+- Fix check_port_type method for ib iser when ib is the port type.
+- Fix examples in the netapp_e_mgmt_interface module.
+- Fix issue with changing host port name.
+- Fix na_santricity_lun_mapping unmapping issue; previously mapped volumes failed to be unmapped.
diff --git a/ansible_collections/netapp_eseries/santricity/ansible.cfg b/ansible_collections/netapp_eseries/santricity/ansible.cfg
new file mode 100644
index 000000000..56346cd27
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/ansible.cfg
@@ -0,0 +1,7 @@
+[galaxy]
+server_list = release_galaxy
+
+[galaxy_server.release_galaxy]
+url=https://galaxy.ansible.com/
+token=260684515156e5658f2ca685ac392c6e40771bad
+
diff --git a/ansible_collections/netapp_eseries/santricity/changelogs/.plugin-cache.yaml b/ansible_collections/netapp_eseries/santricity/changelogs/.plugin-cache.yaml
new file mode 100644
index 000000000..b581da558
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/changelogs/.plugin-cache.yaml
@@ -0,0 +1,297 @@
+plugins:
+ become: {}
+ cache: {}
+ callback: {}
+ cliconf: {}
+ connection: {}
+ httpapi: {}
+ inventory: {}
+ lookup:
+ santricity_host:
+ description: Collects host information
+ name: santricity_host
+ version_added: null
+ santricity_host_detail:
+ description: Expands the host information from santricity_host lookup
+ name: santricity_host_detail
+ version_added: null
+ santricity_storage_pool:
+ description: Storage pool information
+ name: santricity_storage_pool
+ version_added: null
+ module:
+ na_santricity_alerts:
+ description: NetApp E-Series manage email notification settings
+ name: na_santricity_alerts
+ namespace: ''
+ version_added: null
+ na_santricity_alerts_syslog:
+ description: NetApp E-Series manage syslog servers receiving storage system
+ alerts.
+ name: na_santricity_alerts_syslog
+ namespace: ''
+ version_added: null
+ na_santricity_asup:
+ description: NetApp E-Series manage auto-support settings
+ name: na_santricity_asup
+ namespace: ''
+ version_added: null
+ na_santricity_auditlog:
+ description: NetApp E-Series manage audit-log configuration
+ name: na_santricity_auditlog
+ namespace: ''
+ version_added: null
+ na_santricity_auth:
+ description: NetApp E-Series set or update the password for a storage array
+ device or SANtricity Web Services Proxy.
+ name: na_santricity_auth
+ namespace: ''
+ version_added: null
+ na_santricity_client_certificate:
+ description: NetApp E-Series manage remote server certificates.
+ name: na_santricity_client_certificate
+ namespace: ''
+ version_added: null
+ na_santricity_discover:
+ description: NetApp E-Series discover E-Series storage systems
+ name: na_santricity_discover
+ namespace: ''
+ version_added: null
+ na_santricity_drive_firmware:
+ description: NetApp E-Series manage drive firmware
+ name: na_santricity_drive_firmware
+ namespace: ''
+ version_added: null
+ na_santricity_facts:
+ description: NetApp E-Series retrieve facts about NetApp E-Series storage arrays
+ name: na_santricity_facts
+ namespace: ''
+ version_added: null
+ na_santricity_firmware:
+ description: NetApp E-Series manage firmware.
+ name: na_santricity_firmware
+ namespace: ''
+ version_added: null
+ na_santricity_global:
+ description: NetApp E-Series manage global settings configuration
+ name: na_santricity_global
+ namespace: ''
+ version_added: null
+ na_santricity_host:
+ description: NetApp E-Series manage eseries hosts
+ name: na_santricity_host
+ namespace: ''
+ version_added: null
+ na_santricity_hostgroup:
+ description: NetApp E-Series manage array host groups
+ name: na_santricity_hostgroup
+ namespace: ''
+ version_added: null
+ na_santricity_ib_iser_interface:
+ description: NetApp E-Series manage InfiniBand iSER interface configuration
+ name: na_santricity_ib_iser_interface
+ namespace: ''
+ version_added: null
+ na_santricity_iscsi_interface:
+ description: NetApp E-Series manage iSCSI interface configuration
+ name: na_santricity_iscsi_interface
+ namespace: ''
+ version_added: null
+ na_santricity_iscsi_target:
+ description: NetApp E-Series manage iSCSI target configuration
+ name: na_santricity_iscsi_target
+ namespace: ''
+ version_added: null
+ na_santricity_ldap:
+ description: NetApp E-Series manage LDAP integration to use for authentication
+ name: na_santricity_ldap
+ namespace: ''
+ version_added: null
+ na_santricity_lun_mapping:
+ description: NetApp E-Series manage lun mappings
+ name: na_santricity_lun_mapping
+ namespace: ''
+ version_added: null
+ na_santricity_mgmt_interface:
+ description: NetApp E-Series manage management interface configuration
+ name: na_santricity_mgmt_interface
+ namespace: ''
+ version_added: null
+ na_santricity_nvme_interface:
+ description: NetApp E-Series manage NVMe interface configuration
+ name: na_santricity_nvme_interface
+ namespace: ''
+ version_added: null
+ na_santricity_proxy_drive_firmware_upload:
+ description: NetApp E-Series manage proxy drive firmware files
+ name: na_santricity_proxy_drive_firmware_upload
+ namespace: ''
+ version_added: null
+ na_santricity_proxy_firmware_upload:
+ description: NetApp E-Series manage proxy firmware uploads.
+ name: na_santricity_proxy_firmware_upload
+ namespace: ''
+ version_added: null
+ na_santricity_proxy_systems:
+ description: NetApp E-Series manage SANtricity web services proxy storage arrays
+ name: na_santricity_proxy_systems
+ namespace: ''
+ version_added: null
+ na_santricity_storagepool:
+ description: NetApp E-Series manage volume groups and disk pools
+ name: na_santricity_storagepool
+ namespace: ''
+ version_added: null
+ na_santricity_syslog:
+ description: NetApp E-Series manage syslog settings
+ name: na_santricity_syslog
+ namespace: ''
+ version_added: null
+ na_santricity_volume:
+ description: NetApp E-Series manage storage volumes (standard and thin)
+ name: na_santricity_volume
+ namespace: ''
+ version_added: null
+ netapp_e_alerts:
+ description: NetApp E-Series manage email notification settings
+ name: netapp_e_alerts
+ namespace: ''
+ version_added: '2.7'
+ netapp_e_amg:
+ description: NetApp E-Series create, remove, and update asynchronous mirror
+ groups
+ name: netapp_e_amg
+ namespace: ''
+ version_added: '2.2'
+ netapp_e_amg_role:
+ description: NetApp E-Series update the role of a storage array within an Asynchronous
+ Mirror Group (AMG).
+ name: netapp_e_amg_role
+ namespace: ''
+ version_added: '2.2'
+ netapp_e_amg_sync:
+ description: NetApp E-Series conduct synchronization actions on asynchronous
+ mirror groups.
+ name: netapp_e_amg_sync
+ namespace: ''
+ version_added: '2.2'
+ netapp_e_asup:
+ description: NetApp E-Series manage auto-support settings
+ name: netapp_e_asup
+ namespace: ''
+ version_added: '2.7'
+ netapp_e_auditlog:
+ description: NetApp E-Series manage audit-log configuration
+ name: netapp_e_auditlog
+ namespace: ''
+ version_added: '2.7'
+ netapp_e_auth:
+ description: NetApp E-Series set or update the password for a storage array.
+ name: netapp_e_auth
+ namespace: ''
+ version_added: '2.2'
+ netapp_e_drive_firmware:
+ description: NetApp E-Series manage drive firmware
+ name: netapp_e_drive_firmware
+ namespace: ''
+ version_added: '2.9'
+ netapp_e_facts:
+ description: NetApp E-Series retrieve facts about NetApp E-Series storage arrays
+ name: netapp_e_facts
+ namespace: ''
+ version_added: '2.2'
+ netapp_e_firmware:
+ description: NetApp E-Series manage firmware.
+ name: netapp_e_firmware
+ namespace: ''
+ version_added: '2.9'
+ netapp_e_flashcache:
+ description: NetApp E-Series manage SSD caches
+ name: netapp_e_flashcache
+ namespace: ''
+ version_added: '2.2'
+ netapp_e_global:
+ description: NetApp E-Series manage global settings configuration
+ name: netapp_e_global
+ namespace: ''
+ version_added: '2.7'
+ netapp_e_host:
+ description: NetApp E-Series manage eseries hosts
+ name: netapp_e_host
+ namespace: ''
+ version_added: '2.2'
+ netapp_e_hostgroup:
+ description: NetApp E-Series manage array host groups
+ name: netapp_e_hostgroup
+ namespace: ''
+ version_added: '2.2'
+ netapp_e_iscsi_interface:
+ description: NetApp E-Series manage iSCSI interface configuration
+ name: netapp_e_iscsi_interface
+ namespace: ''
+ version_added: '2.7'
+ netapp_e_iscsi_target:
+ description: NetApp E-Series manage iSCSI target configuration
+ name: netapp_e_iscsi_target
+ namespace: ''
+ version_added: '2.7'
+ netapp_e_ldap:
+ description: NetApp E-Series manage LDAP integration to use for authentication
+ name: netapp_e_ldap
+ namespace: ''
+ version_added: '2.7'
+ netapp_e_lun_mapping:
+ description: NetApp E-Series create, delete, or modify lun mappings
+ name: netapp_e_lun_mapping
+ namespace: ''
+ version_added: '2.2'
+ netapp_e_mgmt_interface:
+ description: NetApp E-Series management interface configuration
+ name: netapp_e_mgmt_interface
+ namespace: ''
+ version_added: '2.7'
+ netapp_e_snapshot_group:
+ description: NetApp E-Series manage snapshot groups
+ name: netapp_e_snapshot_group
+ namespace: ''
+ version_added: '2.2'
+ netapp_e_snapshot_images:
+ description: NetApp E-Series create and delete snapshot images
+ name: netapp_e_snapshot_images
+ namespace: ''
+ version_added: '2.2'
+ netapp_e_snapshot_volume:
+ description: NetApp E-Series manage snapshot volumes.
+ name: netapp_e_snapshot_volume
+ namespace: ''
+ version_added: '2.2'
+ netapp_e_storage_system:
+ description: NetApp E-Series Web Services Proxy manage storage arrays
+ name: netapp_e_storage_system
+ namespace: ''
+ version_added: '2.2'
+ netapp_e_storagepool:
+ description: NetApp E-Series manage volume groups and disk pools
+ name: netapp_e_storagepool
+ namespace: ''
+ version_added: '2.2'
+ netapp_e_syslog:
+ description: NetApp E-Series manage syslog settings
+ name: netapp_e_syslog
+ namespace: ''
+ version_added: '2.7'
+ netapp_e_volume:
+ description: NetApp E-Series manage storage volumes (standard and thin)
+ name: netapp_e_volume
+ namespace: ''
+ version_added: '2.2'
+ netapp_e_volume_copy:
+ description: NetApp E-Series create volume copy pairs
+ name: netapp_e_volume_copy
+ namespace: ''
+ version_added: '2.2'
+ netconf: {}
+ shell: {}
+ strategy: {}
+ vars: {}
+version: 1.4.0
diff --git a/ansible_collections/netapp_eseries/santricity/changelogs/changelog.yaml b/ansible_collections/netapp_eseries/santricity/changelogs/changelog.yaml
new file mode 100644
index 000000000..e3bbc107f
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/changelogs/changelog.yaml
@@ -0,0 +1,271 @@
+ancestor: null
+releases:
+ 1.1.0:
+ changes:
+ bugfixes:
+ - Fix check_port_type method for ib iser when ib is the port type.
+ - Fix examples in the netapp_e_mgmt_interface module.
+ - Fix issue with changing host port name.
+ - Fix na_santricity_lun_mapping unmapping issue; previously mapped volumes failed
+ to be unmapped.
+ minor_changes:
+ - Add functionality to remove all inventory configuration in the nar_santricity_host
+ role. Set configuration.eseries_remove_all_configuration=True to remove all
+ storage pool/volume configuration, host, hostgroup, and lun mapping configuration.
+ - Add host_types, host_port_protocols, host_port_information, hostside_io_interface_protocols
+ to netapp_volumes_by_initiators in the na_santricity_facts module.
+ - Add storage pool information to the volume_by_initiator facts.
+ - Add storage system not found exception to the common role's build_info task.
+ - Add volume_metadata option to na_santricity_volume module, add volume_metadata
+ information to the netapp_volumes_by_initiators dictionary in na_santricity_facts
+ module, and update the nar_santricity_host role with the option.
+ - Improve nar_santricity_common storage system api determinations; attempts
+ to discover the storage system using the information provided in the inventory
+ before attempting to search the subnet.
+ - Increased the storage system discovery connection timeouts to 30 seconds to
+ prevent systems from not being discovered over slow connections.
+ - Minimize the facts gathered for the host initiators.
+ - Update ib iser determination to account for changes in firmware 11.60.2.
+ - Use existing Web Services Proxy storage system identifier when one is already
+ created and one is not provided in the inventory.
+ - Utilize eseries_iscsi_iqn before searching host for iqn in nar_santricity_host
+ role.
+ release_summary: This release focused on providing volume details to through
+ the netapp_volumes_by_initiators in the na_santricity_facts module, improving
+ on the nar_santricity_common role storage system API information and resolving
+ issues.
+ fragments:
+ - 1.0.9.yml
+ - add_io_communication_protocol_info_to_volume_by_initator_facts.yml
+ - add_storage_pool_info_to_volume_by_initiator_facts.yml
+ - add_storage_system_not_found_exception.yml
+ - add_undo_configuration.yml
+ - add_volume_metadata_option.yml
+ - fix_change_host_port.yml
+ - fix_ib_iser_port_type.yml
+ - fix_netapp_e_mgmt_interface_examples.yml
+ - fix_volume_unmapping_issue.yml
+ - improve_storage_system_api_determinations.yml
+ - increase_discovery_connection_timeout.yml
+ - minimize_host_initiator_facts_gathered.yml
+ - update_ib_iser_determination.yml
+ - use_existing_proxy_ssid_when_unspecified.yml
+ - utilize_eseries_iscsi_iqn_before_searching_host.yml
+ release_date: '2020-09-18'
+ 1.2.0:
+ changes:
+ bugfixes:
+ - nar_santricity_host - Fix README.md examples.
+ minor_changes:
+ - na_santricity_discover - Add support for discovering storage systems directly
+ using devmgr/v2/storage-systems/1/about endpoint since its old method of discover
+ is being deprecated.
+ - na_santricity_facts - Add storage system information to facilitate ``netapp_eseries.host``
+ collection various protocol configuration.
+ - na_santricity_server_certificate - New module to configure storage system's
+ web server certificate configuration.
+ - na_santricity_snapshot - New module to configure NetApp E-Series Snapshot
+ consistency groups any number of base volumes.
+ - na_santricity_volume - Add percentage size unit (pct) and which allows the
+ creates volumes based on the total storage pool size.
+ - nar_santricity_host - Add eseries_storage_pool_configuration list options,
+ criteria_volume_count, criteria_reserve_free_capacity_pct, and common_volume_host
+ to facilitate volumes based on percentages of storage pool or volume group.
+ - nar_santricity_host - Add support for snapshot group creation.
+ - nar_santricity_host - Improve host mapping information discovery.
+ - nar_santricity_host - Improve storage system discovery related error messages.
+ - nar_santricity_management - Add support for server certificate management.
+ release_summary: 1.2.0 release of ``netapp_eseries.santricity`` collection on
+ 2021-03-01.
+ fragments:
+ - 1.2.0.yml
+ - error-messages.yml
+ - host-mapping-information.yml
+ - hostside-facts.yml
+ - readme-examples.yml
+ - server-certificate.yml
+ - snapshots.yml
+ - storage-system-discovery.yml
+ - volume-by-percentage.yml
+ release_date: '2021-03-30'
+ 1.2.1:
+ changes:
+ minor_changes:
+ - Add IPv6 and FQDN support for NTP
+ - Add IPv6 support for DNS
+ - Add criteria_drive_max_size option to na_santricity_storagepool and nar_santricity_host
+ role.
+ - Add resource-provisioned volumes option to globals and nar_santricity_management
+ role.
+ - Remove resource-provisioned volumes setting from na_santicity_global module
+ and nar_santricity_management role."
+ release_summary: Release 1.2.2 simply removes resource-provisioned volumes feature
+ from collection.
+ fragments:
+ - 1.2.2.yml
+ - criteria_drive_max_size.yml
+ - fix_dns_ntp.yml
+ - remove_resource_provisioned_volumes.yml
+ - resource_provisioned_volume.yml
+ release_date: '2021-04-12'
+ 1.2.10:
+ changes:
+ bugfixes:
+ - Fix PEM certificate/key imports in the na_santricity_server_certificate module.
+ - Fix na_santricity_mgmt_interface IPv4 and IPv6 form validation.
+ minor_changes:
+ - Add login banner message to na_santricity_global module and nar_santricity_management
+ role.
+ - Add usable drive option for na_santricity_storagepool module and nar_santricity_host
+ role which can be used to choose selected drives for storage pool/volumes
+ or define a pattern drive selection.
+ fragments:
+ - add_login_banner_message.yml
+ - add_usable_drive_storage_pool_option.yml
+ - fix_mgmt_ip_address_form_validation.yml
+ - fix_server_pem_certificate_imports.yml
+ release_date: '2021-05-26'
+ 1.2.11:
+ changes:
+ bugfixes:
+ - Fix login banner message option bytes error in na_santricity_global.
+ fragments:
+ - fix_login_banner.yml
+ release_date: '2021-06-01'
+ 1.2.12:
+ changes:
+ bugfixes:
+ - Fix host and host port names from being changed to lower case.
+ fragments:
+ - fix_host_object_naming_case.yml
+ release_date: '2021-06-07'
+ 1.2.13:
+ changes:
+ bugfixes:
+ - Fix availability of client certificate change.
+ fragments:
+ - fix_client_certificate_availability.yml
+ release_date: '2021-06-11'
+ 1.2.2:
+ release_date: '2021-04-13'
+ 1.2.3:
+ changes:
+ bugfixes:
+ - Fix drive firmware upgrade issue that prevented updating firware when drive
+ was in use.
+ minor_changes:
+ - Added nvme4k as a drive type interface to the na_santricity_storagepool module.
+ - Added options for critical and warning threshold setting in na_santricity_storagepool
+ module and nar_santricity_host role.
+ - Fix dynamic disk pool critical and warning threshold settings.
+ fragments:
+ - add_nvme_drive_interface.yml
+ - fix_ddp_threshold_setting.yml
+ - fix_drive_firmware.yml
+ release_date: '2021-04-14'
+ 1.2.4:
+ release_date: '2021-04-14'
+ 1.2.5:
+ changes:
+ bugfixes:
+ - Add missing http(s) proxy username and password parameters from na_santricity_asup
+ module and nar_santricity_management role."
+ - Add missing storage pool configuration parameter, criteria_drive_interface_type,
+ to nar_santricity_host role.
+ fragments:
+ - criteria_drive_interface_type.yml
+ - fix_missing_asup_parameters.yml
+ release_date: '2021-04-19'
+ 1.2.6:
+ changes:
+ bugfixes:
+ - Fix jinja issue with collecting certificates paths in nar_santricity_management
+ role.
+ fragments:
+ - fix_security_certificates.yml
+ release_date: '2021-04-19'
+ 1.2.7:
+ fragments:
+ - proxy_asup_documentation.yml
+ release_date: '2021-04-19'
+ 1.2.8:
+ changes:
+ bugfixes:
+ - Fix pkcs8 private key passphrase issue.
+ - Fix storage system admin password change from web services proxy in na_santricity_auth
+ module.
+ fragments:
+ - fix_pkcs8_cert_issue.yml
+ - fix_proxy_admin_password_change.yml
+ release_date: '2021-05-11'
+ 1.2.9:
+ changes:
+ bugfixes:
+ - Fix missing proxy client and server certificate in management role.
+ - Fix missing proxy validate_certs and change current proxy password variables.
+ - Fix server certificate module not forwarding certificate imports to the embedded
+ web services.
+ minor_changes:
+ - Add eseries_system_old_password variable to faciliate changing the storage
+ system's admin password.
+ - Add remove_unspecified_user_certificates variable to the client certificates
+ module.
+ fragments:
+ - add_eseries_system_old_password_variable_to_change_admin.yml
+ - fix_certificates.yml
+ release_date: '2021-05-13'
+ 1.3.0:
+ changes:
+ bugfixes:
+ - santricity_host - Ensure a list of volumes are provided to prevent netapp_eseries.santricity.santricity_host
+ (lookup) index is string not integer exception.
+ minor_changes:
+ - na_santricity_global - Add controller_shelf_id argument to set controller
+ shelf identifier.
+ - na_santricity_volume - Add flag to control whether volume expansion operations
+ are allowed.
+ - na_santricity_volume - Add volume write cache mirroring option.
+ - nar_santricity_host - Add volume write cache mirroring options.
+ fragments:
+ - add_controller_shelf_id_option.yml
+ - add_flag_to_allow_volume_expansion.yml
+ - add_volume_write_cache_mirroring_option.yml
+ - fix_single_volume_host_mapping_determinations.yml
+ release_date: '2022-04-05'
+ 1.3.1:
+ changes:
+ bugfixes:
+ - na_santricity_mgmt_interface - Fix default required_if state option for na_santricity_mgmt_interface
+ - netapp_eseries.santricity.nar_santricity_host - Fix default MTU value for
+ NVMe RoCE.
+ minor_changes:
+ - Require Ansible 2.10 or later.
+ - na_santricity_volume - Add size_tolerance option to handle the difference
+ in volume size with SANtricity System Manager.
+ - nar_santricity_common - utilize provided eseries management information to
+ determine network to search.
+ fragments:
+ - add_volume_size_tolerance.yml
+ - fix_nvme_roce_mtu_default.yml
+ - fix_required_if_state_option.yml
+ - improve_system_discovery.yml
+ - require_ansible_2.10_or_later.yml
+ release_date: '2022-08-15'
+ 1.4.0:
+ changes:
+ bugfixes:
+ - netapp_eseries.santricity.na_santricity_mgmt_interface - Add the ability to
+ configure DNS, NTP and SSH separately from management interfaces.
+ - netapp_eseries.santricity.nar_santricity_host - Fix default MTU value for
+ NVMe RoCE.
+ - netapp_eseries.santricity.nar_santricity_management - Add tasks to set DNS,
+ NTP and SSH globally separately from management interfaces.
+ minor_changes:
+ - netapp_eseries.santricity.na_santricity_iscsi_interface - Add support of iSCSI
+ HIC speed.
+ - netapp_eseries.santricity.nar_santricity_host - Add support of iSCSI HIC speed.
+ fragments:
+ - add_iscsi_hic_speed.yml
+ - fix_global_management_interface_configuration.yml
+ - fix_nvme_roce_mtu_default.yml
+ release_date: '2023-01-30'
diff --git a/ansible_collections/netapp_eseries/santricity/changelogs/config.yaml b/ansible_collections/netapp_eseries/santricity/changelogs/config.yaml
new file mode 100644
index 000000000..1d41d0850
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/changelogs/config.yaml
@@ -0,0 +1,32 @@
+changelog_filename_template: ../CHANGELOG.rst
+changelog_filename_version_depth: 0
+changes_file: changelog.yaml
+changes_format: combined
+ignore_other_fragment_extensions: true
+keep_fragments: false
+mention_ancestor: true
+new_plugins_after_name: removed_features
+notesdir: fragments
+prelude_section_name: release_summary
+prelude_section_title: Release Summary
+sanitize_changelog: true
+sections:
+- - major_changes
+ - Major Changes
+- - minor_changes
+ - Minor Changes
+- - breaking_changes
+ - Breaking Changes / Porting Guide
+- - deprecated_features
+ - Deprecated Features
+- - removed_features
+ - Removed Features (previously deprecated)
+- - security_fixes
+ - Security Fixes
+- - bugfixes
+ - Bugfixes
+- - known_issues
+ - Known Issues
+title: Netapp E-Series SANtricity Collection
+trivial_section_name: trivial
+use_fqcn: true
diff --git a/ansible_collections/netapp_eseries/santricity/eseries-ansible-collections-diagram.png b/ansible_collections/netapp_eseries/santricity/eseries-ansible-collections-diagram.png
new file mode 100644
index 000000000..0d8f1903c
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/eseries-ansible-collections-diagram.png
Binary files differ
diff --git a/ansible_collections/netapp_eseries/santricity/meta/runtime.yml b/ansible_collections/netapp_eseries/santricity/meta/runtime.yml
new file mode 100644
index 000000000..2ddd9ca66
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/meta/runtime.yml
@@ -0,0 +1,2 @@
+---
+requires_ansible: '>=2.13' \ No newline at end of file
diff --git a/ansible_collections/netapp_eseries/santricity/plugins/doc_fragments/netapp.py b/ansible_collections/netapp_eseries/santricity/plugins/doc_fragments/netapp.py
new file mode 100644
index 000000000..f094b0cc5
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/plugins/doc_fragments/netapp.py
@@ -0,0 +1,57 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Sumit Kumar <sumit4@netapp.com>, chris Archibald <carchi@netapp.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ DOCUMENTATION = r'''
+options:
+ - See respective platform section for more details
+requirements:
+ - See respective platform section for more details
+notes:
+ - Ansible modules are available for the following NetApp Storage Platforms: E-Series, ONTAP, SolidFire
+'''
+
+ # Documentation fragment for E-Series
+ ESERIES = r'''
+options:
+ api_username:
+ required: true
+ type: str
+ description:
+ - The username to authenticate with the SANtricity Web Services Proxy or Embedded Web Services API.
+ api_password:
+ required: true
+ type: str
+ description:
+ - The password to authenticate with the SANtricity Web Services Proxy or Embedded Web Services API.
+ api_url:
+ required: true
+ type: str
+ description:
+ - The url to the SANtricity Web Services Proxy or Embedded Web Services API.
+ Example https://prod-1.wahoo.acme.com/devmgr/v2
+ validate_certs:
+ required: false
+ default: true
+ description:
+ - Should https certificates be validated?
+ type: bool
+ ssid:
+ required: false
+ type: str
+ default: 1
+ description:
+ - The ID of the array to manage. This value must be unique for each array.
+
+notes:
+ - The E-Series Ansible modules require either an instance of the Web Services Proxy (WSP), to be available to manage
+ the storage-system, or an E-Series storage-system that supports the Embedded Web Services API.
+ - Embedded Web Services is currently available on the E2800, E5700, EF570, and newer hardware models.
+ - M(netapp_e_storage_system) may be utilized for configuring the systems managed by a WSP instance.
+'''
diff --git a/ansible_collections/netapp_eseries/santricity/plugins/doc_fragments/santricity.py b/ansible_collections/netapp_eseries/santricity/plugins/doc_fragments/santricity.py
new file mode 100644
index 000000000..0551f2821
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/plugins/doc_fragments/santricity.py
@@ -0,0 +1,90 @@
+# -*- coding: utf-8 -*-
+
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ DOCUMENTATION = r"""
+options:
+ - See respective platform section for more details
+requirements:
+ - See respective platform section for more details
+notes:
+ - Ansible modules are available for the following NetApp Storage Platforms: E-Series
+"""
+
+ # Documentation fragment for E-Series
+ SANTRICITY_PROXY_DOC = r"""
+options:
+ api_username:
+ required: true
+ type: str
+ description:
+ - The username to authenticate with the SANtricity Web Services Proxy or Embedded Web Services API.
+ api_password:
+ required: true
+ type: str
+ description:
+ - The password to authenticate with the SANtricity Web Services Proxy or Embedded Web Services API.
+ api_url:
+ required: true
+ type: str
+ description:
+ - The url to the SANtricity Web Services Proxy or Embedded Web Services API.
+ - Example https://prod-1.wahoo.acme.com:8443/devmgr/v2
+ validate_certs:
+ required: false
+ default: true
+ description:
+ - Should https certificates be validated?
+ type: bool
+
+notes:
+ - The E-Series Ansible modules require either an instance of the Web Services Proxy (WSP), to be available to manage
+ the storage-system, or an E-Series storage-system that supports the Embedded Web Services API.
+ - Embedded Web Services is currently available on the E2800, E5700, EF570, and newer hardware models.
+ - M(netapp_e_storage_system) may be utilized for configuring the systems managed by a WSP instance.
+"""
+
+ # Documentation fragment for E-Series
+ SANTRICITY_DOC = r"""
+options:
+ api_username:
+ required: true
+ type: str
+ description:
+ - The username to authenticate with the SANtricity Web Services Proxy or Embedded Web Services API.
+ api_password:
+ required: true
+ type: str
+ description:
+ - The password to authenticate with the SANtricity Web Services Proxy or Embedded Web Services API.
+ api_url:
+ required: true
+ type: str
+ description:
+ - The url to the SANtricity Web Services Proxy or Embedded Web Services API.
+ - Example https://prod-1.wahoo.acme.com:8443/devmgr/v2
+ validate_certs:
+ required: false
+ default: true
+ description:
+ - Should https certificates be validated?
+ type: bool
+ ssid:
+ required: false
+ type: str
+ default: 1
+ description:
+ - The ID of the array to manage. This value must be unique for each array.
+
+notes:
+ - The E-Series Ansible modules require either an instance of the Web Services Proxy (WSP), to be available to manage
+ the storage-system, or an E-Series storage-system that supports the Embedded Web Services API.
+ - Embedded Web Services is currently available on the E2800, E5700, EF570, and newer hardware models.
+ - M(netapp_e_storage_system) may be utilized for configuring the systems managed by a WSP instance.
+"""
diff --git a/ansible_collections/netapp_eseries/santricity/plugins/lookup/santricity_host.py b/ansible_collections/netapp_eseries/santricity/plugins/lookup/santricity_host.py
new file mode 100644
index 000000000..ca3b93b8b
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/plugins/lookup/santricity_host.py
@@ -0,0 +1,85 @@
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ lookup: santricity_hosts
+ author: Nathan Swartz
+ short_description: Collects host information
+ description:
+ - Collects current host, expected host and host group inventory definitions.
+ options:
+ inventory:
+ description:
+ - E-Series storage array inventory, hostvars[inventory_hostname].
+ - Run na_santricity_facts prior to calling
+ required: True
+ type: complex
+ volumes:
+ description:
+ - Volume information returned from santricity_volume lookup plugin which expands
+"""
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+
+
+class LookupModule(LookupBase):
+
+ def run(self, inventory, volumes, **kwargs):
+ if isinstance(inventory, list):
+ inventory = inventory[0]
+
+ if not isinstance(volumes, list):
+ volumes = [volumes]
+
+ if ("eseries_storage_pool_configuration" not in inventory or not isinstance(inventory["eseries_storage_pool_configuration"], list) or
+ len(inventory["eseries_storage_pool_configuration"]) == 0):
+ return list()
+
+ if "eseries_storage_pool_configuration" not in inventory.keys():
+ raise AnsibleError("eseries_storage_pool_configuration must be defined. See nar_santricity_host role documentation.")
+
+ info = {"current_hosts": {}, "expected_hosts": {}, "host_groups": {}}
+
+ groups = []
+ hosts = []
+ non_inventory_hosts = []
+ non_inventory_groups = []
+ for group in inventory["groups"].keys():
+ groups.append(group)
+ hosts.extend(inventory["groups"][group])
+
+ if "eseries_host_object" in inventory.keys():
+ non_inventory_hosts = [host["name"] for host in inventory["eseries_host_object"]]
+ non_inventory_groups = [host["group"] for host in inventory["eseries_host_object"] if "group" in host]
+
+ for volume in volumes:
+ if volume["state"] == "present" and "host" in volume.keys():
+
+ if volume["host"] in groups:
+ # Add all expected group hosts
+ for expected_host in inventory["groups"][volume["host"]]:
+ if "host_type" in volume:
+ info["expected_hosts"].update({expected_host: {"state": "present",
+ "host_type": volume["host_type"],
+ "group": volume["host"]}})
+ else:
+ info["expected_hosts"].update({expected_host: {"state": "present",
+ "group": volume["host"]}})
+
+ info["host_groups"].update({volume["host"]: inventory["groups"][volume["host"]]})
+
+ elif volume["host"] in hosts:
+ if "host_type" in volume:
+ info["expected_hosts"].update({volume["host"]: {"state": "present",
+ "host_type": volume["host_type"],
+ "group": None}})
+ else:
+ info["expected_hosts"].update({volume["host"]: {"state": "present",
+ "group": None}})
+ elif volume["host"] not in non_inventory_hosts and volume["host"] not in non_inventory_groups:
+ raise AnsibleError("Expected host or host group does not exist in your Ansible inventory and is not specified in"
+ " eseries_host_object variable! [%s]." % volume["host"])
+
+ return [info]
diff --git a/ansible_collections/netapp_eseries/santricity/plugins/lookup/santricity_host_detail.py b/ansible_collections/netapp_eseries/santricity/plugins/lookup/santricity_host_detail.py
new file mode 100644
index 000000000..23da7e7fd
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/plugins/lookup/santricity_host_detail.py
@@ -0,0 +1,106 @@
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ lookup: santricity_hosts_detail
+ author: Nathan Swartz
+ short_description: Expands the host information from santricity_host lookup
+ description:
+ - Expands the host information from santricity_host lookup to include system and port information
+ options:
+ hosts:
+ description:
+ - E-Series storage array inventory, hostvars[inventory_hostname].
+ - Run na_santricity_facts prior to calling
+ required: True
+ type: list
+ hosts_info:
+ description:
+ - The registered results from the setup module from each expected_hosts, hosts_info['results'].
+ - Collected results from the setup module for each expected_hosts from the results of the santricity_host lookup plugin.
+ required: True
+ type: list
+ host_interface_ports:
+ description:
+ - List of dictionaries containing "stdout_lines" which is a list of iqn/wwpns for each expected_hosts from the results of
+ the santricity_host lookup plugin.
+ - Register the results from the shell module that is looped over each host in expected_hosts. The command issued should result
+ in a newline delineated list of iqns, nqns, or wwpns.
+ required: True
+ type: list
+ protocol:
+ description:
+ - Storage system interface protocol (iscsi, sas, fc, ib-iser, ib-srp, nvme_ib, nvme_fc, or nvme_roce)
+ required: True
+ type: str
+
+"""
+import re
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+
+
+class LookupModule(LookupBase):
+ def run(self, hosts, hosts_info, host_interface_ports, protocol, **kwargs):
+ if isinstance(hosts, list):
+ hosts = hosts[0]
+
+ if "expected_hosts" not in hosts:
+ raise AnsibleError("Invalid argument: hosts must contain the output from santricity_host lookup plugin.")
+ if not isinstance(hosts_info, list):
+ raise AnsibleError("Invalid argument: hosts_info must contain the results from the setup module for each"
+ " expected_hosts found in the output of the santricity_host lookup plugin.")
+ if not isinstance(host_interface_ports, list):
+ raise AnsibleError("Invalid argument: host_interface_ports must contain list of dictionaries containing 'stdout_lines' key"
+ " which is a list of iqns, nqns, or wwpns for each expected_hosts from the results of the santricity_host lookup plugin")
+ if protocol not in ["iscsi", "sas", "fc", "ib_iser", "ib_srp", "nvme_ib", "nvme_fc", "nvme_roce"]:
+ raise AnsibleError("Invalid argument: protocol must one of the following: iscsi, sas, fc, ib_iser, ib_srp, nvme_ib, nvme_fc, nvme_roce.")
+
+ for host in hosts["expected_hosts"].keys():
+ sanitized_hostname = re.sub("[.:-]", "_", host)[:20]
+
+ # Add host information to expected host
+ for info in hosts_info:
+ if info["item"] == host:
+
+ # Determine host type
+ if "host_type" not in hosts["expected_hosts"][host].keys():
+ if info["ansible_facts"]["ansible_os_family"].lower() == "windows":
+ hosts["expected_hosts"][host]["host_type"] = "windows"
+ elif info["ansible_facts"]["ansible_os_family"].lower() in ["redhat", "debian", "suse"]:
+ hosts["expected_hosts"][host]["host_type"] = "linux dm-mp"
+
+ # Update hosts object
+ hosts["expected_hosts"][host].update({"sanitized_hostname": sanitized_hostname, "ports": []})
+
+ # Add SAS ports
+ for interface in host_interface_ports:
+ if interface["item"] == host and "stdout_lines" in interface.keys():
+ if protocol == "sas":
+ for index, address in enumerate([base[:-1] + str(index) for base in interface["stdout_lines"] for index in range(8)]):
+ label = "%s_%s" % (sanitized_hostname, index)
+ hosts["expected_hosts"][host]["ports"].append({"type": "sas", "label": label, "port": address})
+ elif protocol == "ib_iser" or protocol == "ib_srp":
+ for index, address in enumerate(interface["stdout_lines"]):
+ label = "%s_%s" % (sanitized_hostname, index)
+ hosts["expected_hosts"][host]["ports"].append({"type": "ib", "label": label, "port": address})
+ elif protocol == "nvme_ib":
+ for index, address in enumerate(interface["stdout_lines"]):
+ label = "%s_%s" % (sanitized_hostname, index)
+ hosts["expected_hosts"][host]["ports"].append({"type": "nvmeof", "label": label, "port": address})
+ elif protocol == "nvme_fc":
+ for index, address in enumerate(interface["stdout_lines"]):
+ label = "%s_%s" % (sanitized_hostname, index)
+ hosts["expected_hosts"][host]["ports"].append({"type": "nvmeof", "label": label, "port": address})
+ elif protocol == "nvme_roce":
+ for index, address in enumerate(interface["stdout_lines"]):
+ label = "%s_%s" % (sanitized_hostname, index)
+ hosts["expected_hosts"][host]["ports"].append({"type": "nvmeof", "label": label, "port": address})
+ else:
+ for index, address in enumerate(interface["stdout_lines"]):
+ label = "%s_%s" % (sanitized_hostname, index)
+ hosts["expected_hosts"][host]["ports"].append({"type": protocol, "label": label, "port": address})
+
+ return [hosts]
diff --git a/ansible_collections/netapp_eseries/santricity/plugins/lookup/santricity_lun_mapping.py b/ansible_collections/netapp_eseries/santricity/plugins/lookup/santricity_lun_mapping.py
new file mode 100644
index 000000000..6b5e30484
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/plugins/lookup/santricity_lun_mapping.py
@@ -0,0 +1,143 @@
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.plugins.lookup import LookupBase
+from ansible.errors import AnsibleError
+
+
+class LookupModule(LookupBase):
+ def run(self, array_facts, volumes, **kwargs):
+ if isinstance(array_facts, list):
+ array_facts = array_facts[0]
+
+ if isinstance(volumes, dict): # This means that there is only one volume and volumes was stripped of its list
+ volumes = [volumes]
+
+ if "storage_array_facts" not in array_facts.keys():
+ # Don't throw exceptions unless you want run to terminate!!!
+ # raise AnsibleError("Storage array information not available. Collect facts using na_santricity_facts module.")
+ return list()
+
+ # Remove any absent volumes
+ volumes = [vol for vol in volumes if "state" not in vol or vol["state"] == "present"]
+
+ self.array_facts = array_facts["storage_array_facts"]
+ self.luns_by_target = self.array_facts["netapp_luns_by_target"]
+ self.access_volume_lun = self.array_facts["netapp_default_hostgroup_access_volume_lun"]
+
+ # Search for volumes that have a specified host or host group initiator
+ mapping_info = list()
+ for volume in volumes:
+ if "host" in volume.keys():
+
+ # host initiator is already mapped on the storage system
+ if volume["host"] in self.luns_by_target:
+
+ used_luns = [lun for name, lun in self.luns_by_target[volume["host"]]]
+ for host_group in self.array_facts["netapp_host_groups"]:
+ if volume["host"] == host_group["name"]: # target is an existing host group
+ for host in host_group["hosts"]:
+ used_luns.extend([lun for name, lun in self.luns_by_target[host]])
+ break
+ elif volume["host"] in host_group["hosts"]: # target is an existing host in the host group.
+ used_luns.extend([lun for name, lun in self.luns_by_target[host_group["name"]]])
+ break
+
+ for name, lun in self.luns_by_target[volume["host"]]:
+
+ # Check whether volume is mapped to the expected host
+ if name == volume["name"]:
+ # Check whether lun option differs from existing lun
+ if "lun" in volume and volume["lun"] != lun:
+ self.change_volume_mapping_lun(volume["name"], volume["host"], volume["lun"])
+ lun = volume["lun"]
+
+ if lun in used_luns:
+ raise AnsibleError("Volume [%s] cannot be mapped to host or host group [%s] using lun number %s!"
+ % (name, volume["host"], lun))
+
+ mapping_info.append({"volume": volume["name"], "target": volume["host"], "lun": lun})
+ break
+
+ # Volume has not been mapped to host initiator
+ else:
+
+ # Check whether lun option has been used
+ if "lun" in volume:
+ if volume["lun"] in used_luns:
+ for target in self.array_facts["netapp_luns_by_target"].keys():
+ for mapped_volume, mapped_lun in [entry for entry in self.array_facts["netapp_luns_by_target"][target] if entry]:
+ if volume["lun"] == mapped_lun:
+ if volume["name"] != mapped_volume:
+ raise AnsibleError("Volume [%s] cannot be mapped to host or host group [%s] using lun number %s!"
+ % (volume["name"], volume["host"], volume["lun"]))
+ else: # volume is being remapped with the same lun number
+ self.remove_volume_mapping(mapped_volume, target)
+ lun = volume["lun"]
+ else:
+ lun = self.next_available_lun(used_luns)
+
+ mapping_info.append({"volume": volume["name"], "target": volume["host"], "lun": lun})
+ self.add_volume_mapping(volume["name"], volume["host"], lun)
+
+ else:
+ raise AnsibleError("The host or host group [%s] is not defined!" % volume["host"])
+ else:
+ mapping_info.append({"volume": volume["name"]})
+
+ return mapping_info
+
+ def next_available_lun(self, used_luns):
+ """Find next available lun numbers."""
+ if self.access_volume_lun is not None:
+ used_luns.append(self.access_volume_lun)
+
+ lun = 1
+ while lun in used_luns:
+ lun += 1
+
+ return lun
+
+ def add_volume_mapping(self, name, host, lun):
+ """Add volume mapping to record table (luns_by_target)."""
+ # Find associated group and the groups hosts
+ for host_group in self.array_facts["netapp_host_groups"]:
+
+ if host == host_group["name"]:
+ # add to group
+ self.luns_by_target[host].append([name, lun])
+
+ # add to hosts
+ for hostgroup_host in host_group["hosts"]:
+ self.luns_by_target[hostgroup_host].append([name, lun])
+
+ break
+ else:
+ self.luns_by_target[host].append([name, lun])
+
+ def remove_volume_mapping(self, name, host):
+ """remove volume mapping to record table (luns_by_target)."""
+ # Find associated group and the groups hosts
+ for host_group in self.array_facts["netapp_host_groups"]:
+ if host == host_group["name"]:
+ # add to group
+ for entry in self.luns_by_target[host_group["name"]]:
+ if entry[0] == name:
+ del entry
+ # add to hosts
+ for hostgroup_host in host_group["hosts"]:
+ for entry in self.luns_by_target[hostgroup_host]:
+ if entry[0] == name:
+ del entry
+ break
+ else:
+ for index, entry in enumerate(self.luns_by_target[host]):
+ if entry[0] == name:
+ self.luns_by_target[host].pop(index)
+
+ def change_volume_mapping_lun(self, name, host, lun):
+ """remove volume mapping to record table (luns_by_target)."""
+ self.remove_volume_mapping(name, host)
+ self.add_volume_mapping(name, host, lun)
diff --git a/ansible_collections/netapp_eseries/santricity/plugins/lookup/santricity_storage_pool.py b/ansible_collections/netapp_eseries/santricity/plugins/lookup/santricity_storage_pool.py
new file mode 100644
index 000000000..3fd2df2b7
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/plugins/lookup/santricity_storage_pool.py
@@ -0,0 +1,80 @@
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ lookup: santricity_sp_config
+ author: Nathan Swartz
+ short_description: Storage pool information
+ description:
+ - Retrieves storage pool information from the inventory
+"""
+import re
+from ansible.plugins.lookup import LookupBase
+from ansible.errors import AnsibleError
+from itertools import product
+
+
+class LookupModule(LookupBase):
+ def run(self, inventory, state, **kwargs):
+ if isinstance(inventory, list):
+ inventory = inventory[0]
+
+ if ("eseries_storage_pool_configuration" not in inventory or not isinstance(inventory["eseries_storage_pool_configuration"], list) or
+ len(inventory["eseries_storage_pool_configuration"]) == 0):
+ return list()
+
+ sp_list = list()
+ for sp_info in inventory["eseries_storage_pool_configuration"]:
+
+ if not isinstance(sp_info, dict) or "name" not in sp_info:
+ raise AnsibleError("eseries_storage_pool_configuration must contain a list of dictionaries containing the necessary information.")
+
+ for sp in patternize(sp_info["name"], inventory):
+ if (("eseries_remove_all_configuration_state" in inventory and inventory["eseries_remove_all_configuration_state"] == "absent") or
+ ("state" in sp_info and sp_info["state"] == "absent") or
+ ("state" not in sp_info and "eseries_storage_pool_state" in inventory and inventory["eseries_storage_pool_state"] == "absent")):
+ sp_options = {"state": "absent"}
+ else:
+ sp_options = {"state": "present"}
+
+ for option in sp_info.keys():
+ sp_options.update({option: sp_info[option]})
+
+ sp_options.update({"name": sp})
+
+ if sp_options["state"] == state:
+ sp_list.append(sp_options)
+
+ return sp_list
+
+
+def patternize(pattern, inventory, storage_pool=None):
+ """Generate list of strings determined by a pattern"""
+ if storage_pool:
+ pattern = pattern.replace("[pool]", storage_pool)
+
+ if inventory:
+ inventory_tokens = re.findall(r"\[[a-zA-Z0-9_]*\]", pattern)
+ for token in inventory_tokens:
+ pattern = pattern.replace(token, str(inventory[token[1:-1]]))
+
+ tokens = re.findall(r"\[[0-9]-[0-9]\]|\[[a-z]-[a-z]\]|\[[A-Z]-[A-Z]\]", pattern)
+ segments = "%s".join(re.split(r"\[[0-9]-[0-9]\]|\[[a-z]-[a-z]\]|\[[A-Z]-[A-Z]\]", pattern))
+
+ if len(tokens) == 0:
+ return [pattern]
+
+ combinations = []
+ for token in tokens:
+ start, stop = token[1:-1].split("-")
+
+ try:
+ start = int(start)
+ stop = int(stop)
+ combinations.append([str(number) for number in range(start, stop + 1)])
+ except ValueError:
+ combinations.append([chr(number) for number in range(ord(start), ord(stop) + 1)])
+
+ return [segments % subset for subset in list(product(*combinations))]
diff --git a/ansible_collections/netapp_eseries/santricity/plugins/lookup/santricity_volume.py b/ansible_collections/netapp_eseries/santricity/plugins/lookup/santricity_volume.py
new file mode 100644
index 000000000..10400b688
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/plugins/lookup/santricity_volume.py
@@ -0,0 +1,128 @@
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+from ansible.plugins.lookup import LookupBase
+from ansible.errors import AnsibleError
+from itertools import product
+
+
+class LookupModule(LookupBase):
+
+ def run(self, inventory, **kwargs):
+ if isinstance(inventory, list):
+ inventory = inventory[0]
+
+ if ("eseries_storage_pool_configuration" not in inventory.keys() or not isinstance(inventory["eseries_storage_pool_configuration"], list) or
+ len(inventory["eseries_storage_pool_configuration"]) == 0):
+ return list()
+
+ vol_list = list()
+ for sp_info in inventory["eseries_storage_pool_configuration"]:
+ if "name" not in sp_info.keys():
+ continue
+ if "volumes" in sp_info.keys() and ("criteria_volume_count" in sp_info.keys() or "criteria_reserve_free_capacity_pct" in sp_info.keys()):
+ raise AnsibleError("Incompatible parameters: You cannot specify both volumes with either criteria_volume_count or "
+ "criteria_reserve_free_capacity for any given eseries_storage_pool_configuration entry.")
+ if ("common_volume_configuration" in sp_info.keys() and isinstance(sp_info["common_volume_configuration"], dict) and
+ "size" in sp_info["common_volume_configuration"].keys() and "criteria_reserve_free_capacity_pct" in sp_info.keys()):
+ raise AnsibleError("Incompatible parameters: You cannot specify both size in common_volume_configuration with "
+ "criteria_reserve_free_capacity for any given eseries_storage_pool_configuration entry.")
+
+ if "volumes" not in sp_info.keys():
+ if "criteria_volume_count" in sp_info.keys():
+ if "common_volume_configuration" not in sp_info:
+ sp_info.update({"common_volume_configuration": {}})
+
+ reserve_free_capacity_pct = sp_info["criteria_reserve_free_capacity_pct"] if "criteria_reserve_free_capacity_pct" in sp_info.keys() else 0.0
+ volume_size = (100.0 - reserve_free_capacity_pct) / sp_info["criteria_volume_count"]
+ count_digits = len(str(sp_info["criteria_volume_count"]))
+
+ if "size" not in sp_info["common_volume_configuration"].keys():
+ sp_info["common_volume_configuration"].update({"size": volume_size, "size_unit": "pct"})
+ if "host" not in sp_info["common_volume_configuration"].keys() and "common_volume_host" in sp_info.keys():
+ sp_info["common_volume_configuration"].update({"host": sp_info["common_volume_host"]})
+
+ if (("eseries_remove_all_configuration_state" in inventory and inventory["eseries_remove_all_configuration_state"] == "absent") or
+ ("state" in sp_info and sp_info["state"] == "absent") or
+ ("state" not in sp_info and "eseries_volume_state" in inventory and inventory["eseries_volume_state"] == "absent")):
+ sp_info["common_volume_configuration"].update({"state": "absent"})
+ else:
+ sp_info["common_volume_configuration"].update({"state": "present"})
+
+ for count in range(sp_info["criteria_volume_count"]):
+ if "volumes" not in sp_info.keys():
+ sp_info.update({"volumes": []})
+ sp_info["volumes"].append({"name": "[pool]_%0*d" % (count_digits, count)})
+ else:
+ continue
+
+ elif not isinstance(sp_info["volumes"], list):
+ raise AnsibleError("Volumes must be a list")
+
+ for sp in patternize(sp_info["name"], inventory):
+ for vol_info in sp_info["volumes"]:
+
+ if not isinstance(vol_info, dict):
+ raise AnsibleError("Volume in the storage pool, %s, must be a dictionary." % sp_info["name"])
+
+ for vol in patternize(vol_info["name"], inventory, storage_pool=sp):
+ vol_options = dict()
+
+ # Add common_volume_configuration information
+ combined_volume_metadata = {}
+ if "common_volume_configuration" in sp_info:
+ for option, value in sp_info["common_volume_configuration"].items():
+ vol_options.update({option: value})
+ if "volume_metadata" in sp_info["common_volume_configuration"].keys():
+ combined_volume_metadata.update(sp_info["common_volume_configuration"]["volume_metadata"])
+
+ # Add/update volume specific information
+ for option, value in vol_info.items():
+ vol_options.update({option: value})
+ if "volume_metadata" in vol_info.keys():
+ combined_volume_metadata.update(vol_info["volume_metadata"])
+ vol_options.update({"volume_metadata": combined_volume_metadata})
+
+ if (("eseries_remove_all_configuration_state" in inventory and inventory["eseries_remove_all_configuration_state"] == "absent") or
+ ("state" in sp_info and sp_info["state"] == "absent") or
+ ("state" not in sp_info and "eseries_volume_state" in inventory and inventory["eseries_volume_state"] == "absent")):
+ vol_options.update({"state": "absent"})
+ else:
+ vol_options.update({"state": "present"})
+
+ vol_options.update({"name": vol, "storage_pool_name": sp})
+ vol_list.append(vol_options)
+ return vol_list
+
+
+def patternize(pattern, inventory, storage_pool=None):
+ """Generate list of strings determined by a pattern"""
+ if storage_pool:
+ pattern = pattern.replace("[pool]", storage_pool)
+
+ if inventory:
+ inventory_tokens = re.findall(r"\[[a-zA-Z0-9_]*\]", pattern)
+ for token in inventory_tokens:
+ pattern = pattern.replace(token, str(inventory[token[1:-1]]))
+
+ tokens = re.findall(r"\[[0-9]-[0-9]\]|\[[a-z]-[a-z]\]|\[[A-Z]-[A-Z]\]", pattern)
+ segments = "%s".join(re.split(r"\[[0-9]-[0-9]\]|\[[a-z]-[a-z]\]|\[[A-Z]-[A-Z]\]", pattern))
+
+ if len(tokens) == 0:
+ return [pattern]
+
+ combinations = []
+ for token in tokens:
+ start, stop = token[1:-1].split("-")
+
+ try:
+ start = int(start)
+ stop = int(stop)
+ combinations.append([str(number) for number in range(start, stop + 1)])
+ except ValueError:
+ combinations.append([chr(number) for number in range(ord(start), ord(stop) + 1)])
+
+ return [segments % subset for subset in list(product(*combinations))]
diff --git a/ansible_collections/netapp_eseries/santricity/plugins/module_utils/netapp.py b/ansible_collections/netapp_eseries/santricity/plugins/module_utils/netapp.py
new file mode 100644
index 000000000..b87e65955
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/plugins/module_utils/netapp.py
@@ -0,0 +1,746 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c) 2017, Sumit Kumar <sumit4@netapp.com>
+# Copyright (c) 2017, Michael Price <michael.price@netapp.com>
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import json
+import os
+import random
+import mimetypes
+
+from pprint import pformat
+from ansible.module_utils import six
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
+from ansible.module_utils.urls import open_url
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils._text import to_native
+
+try:
+ from ansible.module_utils.ansible_release import __version__ as ansible_version
+except ImportError:
+ ansible_version = 'unknown'
+
+try:
+ from netapp_lib.api.zapi import zapi
+ HAS_NETAPP_LIB = True
+except ImportError:
+ HAS_NETAPP_LIB = False
+
+try:
+ import requests
+ HAS_REQUESTS = True
+except ImportError:
+ HAS_REQUESTS = False
+
+import ssl
+try:
+ from urlparse import urlparse, urlunparse
+except ImportError:
+ from urllib.parse import urlparse, urlunparse
+
+
+HAS_SF_SDK = False
+SF_BYTE_MAP = dict(
+ # Management GUI displays 1024 ** 3 as 1.1 GB, thus use 1000.
+ bytes=1,
+ b=1,
+ kb=1000,
+ mb=1000 ** 2,
+ gb=1000 ** 3,
+ tb=1000 ** 4,
+ pb=1000 ** 5,
+ eb=1000 ** 6,
+ zb=1000 ** 7,
+ yb=1000 ** 8
+)
+
+POW2_BYTE_MAP = dict(
+ # Here, 1 kb = 1024
+ bytes=1,
+ b=1,
+ kb=1024,
+ mb=1024 ** 2,
+ gb=1024 ** 3,
+ tb=1024 ** 4,
+ pb=1024 ** 5,
+ eb=1024 ** 6,
+ zb=1024 ** 7,
+ yb=1024 ** 8
+)
+
+try:
+ from solidfire.factory import ElementFactory
+ from solidfire.custom.models import TimeIntervalFrequency
+ from solidfire.models import Schedule, ScheduleInfo
+
+ HAS_SF_SDK = True
+except Exception:
+ HAS_SF_SDK = False
+
+
+def has_netapp_lib():
+ return HAS_NETAPP_LIB
+
+
+def has_sf_sdk():
+ return HAS_SF_SDK
+
+
+def na_ontap_host_argument_spec():
+
+ return dict(
+ hostname=dict(required=True, type='str'),
+ username=dict(required=True, type='str', aliases=['user']),
+ password=dict(required=True, type='str', aliases=['pass'], no_log=True),
+ https=dict(required=False, type='bool', default=False),
+ validate_certs=dict(required=False, type='bool', default=True),
+ http_port=dict(required=False, type='int'),
+ ontapi=dict(required=False, type='int'),
+ use_rest=dict(required=False, type='str', default='Auto', choices=['Never', 'Always', 'Auto'])
+ )
+
+
+def ontap_sf_host_argument_spec():
+
+ return dict(
+ hostname=dict(required=True, type='str'),
+ username=dict(required=True, type='str', aliases=['user']),
+ password=dict(required=True, type='str', aliases=['pass'], no_log=True)
+ )
+
+
+def aws_cvs_host_argument_spec():
+
+ return dict(
+ api_url=dict(required=True, type='str'),
+ validate_certs=dict(required=False, type='bool', default=True),
+ api_key=dict(required=True, type='str'),
+ secret_key=dict(required=True, type='str')
+ )
+
+
+def create_sf_connection(module, port=None):
+ hostname = module.params['hostname']
+ username = module.params['username']
+ password = module.params['password']
+
+ if HAS_SF_SDK and hostname and username and password:
+ try:
+ return_val = ElementFactory.create(hostname, username, password, port=port)
+ return return_val
+ except Exception:
+ raise Exception("Unable to create SF connection")
+ else:
+ module.fail_json(msg="the python SolidFire SDK module is required")
+
+
+def setup_na_ontap_zapi(module, vserver=None):
+ hostname = module.params['hostname']
+ username = module.params['username']
+ password = module.params['password']
+ https = module.params['https']
+ validate_certs = module.params['validate_certs']
+ port = module.params['http_port']
+ version = module.params['ontapi']
+
+ if HAS_NETAPP_LIB:
+ # set up zapi
+ server = zapi.NaServer(hostname)
+ server.set_username(username)
+ server.set_password(password)
+ if vserver:
+ server.set_vserver(vserver)
+ if version:
+ minor = version
+ else:
+ minor = 110
+ server.set_api_version(major=1, minor=minor)
+ # default is HTTP
+ if https:
+ if port is None:
+ port = 443
+ transport_type = 'HTTPS'
+ # HACK to bypass certificate verification
+ if validate_certs is False:
+ if not os.environ.get('PYTHONHTTPSVERIFY', '') and getattr(ssl, '_create_unverified_context', None):
+ ssl._create_default_https_context = ssl._create_unverified_context
+ else:
+ if port is None:
+ port = 80
+ transport_type = 'HTTP'
+ server.set_transport_type(transport_type)
+ server.set_port(port)
+ server.set_server_type('FILER')
+ return server
+ else:
+ module.fail_json(msg="the python NetApp-Lib module is required")
+
+
+def setup_ontap_zapi(module, vserver=None):
+ hostname = module.params['hostname']
+ username = module.params['username']
+ password = module.params['password']
+
+ if HAS_NETAPP_LIB:
+ # set up zapi
+ server = zapi.NaServer(hostname)
+ server.set_username(username)
+ server.set_password(password)
+ if vserver:
+ server.set_vserver(vserver)
+ # Todo : Replace hard-coded values with configurable parameters.
+ server.set_api_version(major=1, minor=110)
+ server.set_port(80)
+ server.set_server_type('FILER')
+ server.set_transport_type('HTTP')
+ return server
+ else:
+ module.fail_json(msg="the python NetApp-Lib module is required")
+
+
+def eseries_host_argument_spec():
+ """Retrieve a base argument specification common to all NetApp E-Series modules"""
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ api_username=dict(type='str', required=True),
+ api_password=dict(type='str', required=True, no_log=True),
+ api_url=dict(type='str', required=True),
+ ssid=dict(type='str', required=False, default='1'),
+ validate_certs=dict(type='bool', required=False, default=True)
+ ))
+ return argument_spec
+
+
+class NetAppESeriesModule(object):
+ """Base class for all NetApp E-Series modules.
+
+ Provides a set of common methods for NetApp E-Series modules, including version checking, mode (proxy, embedded)
+ verification, http requests, secure http redirection for embedded web services, and logging setup.
+
+ Be sure to add the following lines in the module's documentation section:
+ extends_documentation_fragment:
+ - netapp.eseries
+
+ :param dict(dict) ansible_options: dictionary of ansible option definitions
+ :param str web_services_version: minimally required web services rest api version (default value: "02.00.0000.0000")
+ :param bool supports_check_mode: whether the module will support the check_mode capabilities (default=False)
+ :param list(list) mutually_exclusive: list containing list(s) of mutually exclusive options (optional)
+ :param list(list) required_if: list containing list(s) containing the option, the option value, and then
+ a list of required options. (optional)
+ :param list(list) required_one_of: list containing list(s) of options for which at least one is required. (optional)
+ :param list(list) required_together: list containing list(s) of options that are required together. (optional)
+ :param bool log_requests: controls whether to log each request (default: True)
+ """
+ DEFAULT_TIMEOUT = 60
+ DEFAULT_SECURE_PORT = "8443"
+ DEFAULT_REST_API_PATH = "devmgr/v2/"
+ DEFAULT_REST_API_ABOUT_PATH = "devmgr/utils/about"
+ DEFAULT_HEADERS = {"Content-Type": "application/json", "Accept": "application/json",
+ "netapp-client-type": "Ansible-%s" % ansible_version}
+ HTTP_AGENT = "Ansible / %s" % ansible_version
+ SIZE_UNIT_MAP = dict(bytes=1, b=1, kb=1024, mb=1024**2, gb=1024**3, tb=1024**4,
+ pb=1024**5, eb=1024**6, zb=1024**7, yb=1024**8)
+
+ def __init__(self, ansible_options, web_services_version=None, supports_check_mode=False,
+ mutually_exclusive=None, required_if=None, required_one_of=None, required_together=None,
+ log_requests=True):
+ argument_spec = eseries_host_argument_spec()
+ argument_spec.update(ansible_options)
+
+ self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=supports_check_mode,
+ mutually_exclusive=mutually_exclusive, required_if=required_if,
+ required_one_of=required_one_of, required_together=required_together)
+
+ args = self.module.params
+ self.web_services_version = web_services_version if web_services_version else "02.00.0000.0000"
+ self.ssid = args["ssid"]
+ self.url = args["api_url"]
+ self.log_requests = log_requests
+ self.creds = dict(url_username=args["api_username"],
+ url_password=args["api_password"],
+ validate_certs=args["validate_certs"])
+
+ if not self.url.endswith("/"):
+ self.url += "/"
+
+ self.is_embedded_mode = None
+ self.is_web_services_valid_cache = None
+
+ def _check_web_services_version(self):
+ """Verify proxy or embedded web services meets minimum version required for module.
+
+ The minimum required web services version is evaluated against version supplied through the web services rest
+ api. AnsibleFailJson exception will be raised when the minimum is not met or exceeded.
+
+ This helper function will update the supplied api url if secure http is not used for embedded web services
+
+ :raise AnsibleFailJson: raised when the contacted api service does not meet the minimum required version.
+ """
+ if not self.is_web_services_valid_cache:
+
+ url_parts = urlparse(self.url)
+ if not url_parts.scheme or not url_parts.netloc:
+ self.module.fail_json(msg="Failed to provide valid API URL. Example: https://192.168.1.100:8443/devmgr/v2. URL [%s]." % self.url)
+
+ if url_parts.scheme not in ["http", "https"]:
+ self.module.fail_json(msg="Protocol must be http or https. URL [%s]." % self.url)
+
+ self.url = "%s://%s/" % (url_parts.scheme, url_parts.netloc)
+ about_url = self.url + self.DEFAULT_REST_API_ABOUT_PATH
+ rc, data = request(about_url, timeout=self.DEFAULT_TIMEOUT, headers=self.DEFAULT_HEADERS, ignore_errors=True, **self.creds)
+
+ if rc != 200:
+ self.module.warn("Failed to retrieve web services about information! Retrying with secure ports. Array Id [%s]." % self.ssid)
+ self.url = "https://%s:8443/" % url_parts.netloc.split(":")[0]
+ about_url = self.url + self.DEFAULT_REST_API_ABOUT_PATH
+ try:
+ rc, data = request(about_url, timeout=self.DEFAULT_TIMEOUT, headers=self.DEFAULT_HEADERS, **self.creds)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve the webservices about information! Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(error)))
+
+ major, minor, other, revision = data["version"].split(".")
+ minimum_major, minimum_minor, other, minimum_revision = self.web_services_version.split(".")
+
+ if not (major > minimum_major or
+ (major == minimum_major and minor > minimum_minor) or
+ (major == minimum_major and minor == minimum_minor and revision >= minimum_revision)):
+ self.module.fail_json(msg="Web services version does not meet minimum version required. Current version: [%s]."
+ " Version required: [%s]." % (data["version"], self.web_services_version))
+
+ self.module.log("Web services rest api version met the minimum required version.")
+ self.is_web_services_valid_cache = True
+
+ def is_embedded(self):
+ """Determine whether web services server is the embedded web services.
+
+ If web services about endpoint fails based on an URLError then the request will be attempted again using
+ secure http.
+
+ :raise AnsibleFailJson: raised when web services about endpoint failed to be contacted.
+ :return bool: whether contacted web services is running from storage array (embedded) or from a proxy.
+ """
+ self._check_web_services_version()
+
+ if self.is_embedded_mode is None:
+ about_url = self.url + self.DEFAULT_REST_API_ABOUT_PATH
+ try:
+ rc, data = request(about_url, timeout=self.DEFAULT_TIMEOUT, headers=self.DEFAULT_HEADERS, **self.creds)
+ self.is_embedded_mode = not data["runningAsProxy"]
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve the webservices about information! Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(error)))
+
+ return self.is_embedded_mode
+
+ def request(self, path, data=None, method='GET', headers=None, ignore_errors=False):
+ """Issue an HTTP request to a url, retrieving an optional JSON response.
+
+ :param str path: web services rest api endpoint path (Example: storage-systems/1/graph). Note that when the
+ full url path is specified then that will be used without supplying the protocol, hostname, port and rest path.
+ :param data: data required for the request (data may be json or any python structured data)
+ :param str method: request method such as GET, POST, DELETE.
+ :param dict headers: dictionary containing request headers.
+ :param bool ignore_errors: forces the request to ignore any raised exceptions.
+ """
+ self._check_web_services_version()
+
+ if headers is None:
+ headers = self.DEFAULT_HEADERS
+
+ if not isinstance(data, str) and headers["Content-Type"] == "application/json":
+ data = json.dumps(data)
+
+ if path.startswith("/"):
+ path = path[1:]
+ request_url = self.url + self.DEFAULT_REST_API_PATH + path
+
+ if self.log_requests or True:
+ self.module.log(pformat(dict(url=request_url, data=data, method=method)))
+
+ return request(url=request_url, data=data, method=method, headers=headers, use_proxy=True, force=False, last_mod_time=None,
+ timeout=self.DEFAULT_TIMEOUT, http_agent=self.HTTP_AGENT, force_basic_auth=True, ignore_errors=ignore_errors, **self.creds)
+
+
+def create_multipart_formdata(files, fields=None, send_8kb=False):
+ """Create the data for a multipart/form request.
+
+ :param list(list) files: list of lists each containing (name, filename, path).
+ :param list(list) fields: list of lists each containing (key, value).
+ :param bool send_8kb: only sends the first 8kb of the files (default: False).
+ """
+ boundary = "---------------------------" + "".join([str(random.randint(0, 9)) for x in range(27)])
+ data_parts = list()
+ data = None
+
+ if six.PY2: # Generate payload for Python 2
+ newline = "\r\n"
+ if fields is not None:
+ for key, value in fields:
+ data_parts.extend(["--%s" % boundary,
+ 'Content-Disposition: form-data; name="%s"' % key,
+ "",
+ value])
+
+ for name, filename, path in files:
+ with open(path, "rb") as fh:
+ value = fh.read(8192) if send_8kb else fh.read()
+
+ data_parts.extend(["--%s" % boundary,
+ 'Content-Disposition: form-data; name="%s"; filename="%s"' % (name, filename),
+ "Content-Type: %s" % (mimetypes.guess_type(path)[0] or "application/octet-stream"),
+ "",
+ value])
+ data_parts.extend(["--%s--" % boundary, ""])
+ data = newline.join(data_parts)
+
+ else:
+ newline = six.b("\r\n")
+ if fields is not None:
+ for key, value in fields:
+ data_parts.extend([six.b("--%s" % boundary),
+ six.b('Content-Disposition: form-data; name="%s"' % key),
+ six.b(""),
+ six.b(value)])
+
+ for name, filename, path in files:
+ with open(path, "rb") as fh:
+ value = fh.read(8192) if send_8kb else fh.read()
+
+ data_parts.extend([six.b("--%s" % boundary),
+ six.b('Content-Disposition: form-data; name="%s"; filename="%s"' % (name, filename)),
+ six.b("Content-Type: %s" % (mimetypes.guess_type(path)[0] or "application/octet-stream")),
+ six.b(""),
+ value])
+ data_parts.extend([six.b("--%s--" % boundary), b""])
+ data = newline.join(data_parts)
+
+ headers = {
+ "Content-Type": "multipart/form-data; boundary=%s" % boundary,
+ "Content-Length": str(len(data))}
+
+ return headers, data
+
+
+def request(url, data=None, headers=None, method='GET', use_proxy=True,
+ force=False, last_mod_time=None, timeout=10, validate_certs=True,
+ url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
+ """Issue an HTTP request to a url, retrieving an optional JSON response."""
+
+ if headers is None:
+ headers = {"Content-Type": "application/json", "Accept": "application/json"}
+ headers.update({"netapp-client-type": "Ansible-%s" % ansible_version})
+
+ if not http_agent:
+ http_agent = "Ansible / %s" % ansible_version
+
+ try:
+ r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
+ force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
+ url_username=url_username, url_password=url_password, http_agent=http_agent,
+ force_basic_auth=force_basic_auth)
+ except HTTPError as err:
+ r = err.fp
+
+ try:
+ raw_data = r.read()
+ if raw_data:
+ data = json.loads(raw_data)
+ else:
+ raw_data = None
+ except Exception:
+ if ignore_errors:
+ pass
+ else:
+ raise Exception(raw_data)
+
+ resp_code = r.getcode()
+
+ if resp_code >= 400 and not ignore_errors:
+ raise Exception(resp_code, data)
+ else:
+ return resp_code, data
+
+
+def ems_log_event(source, server, name="Ansible", id="12345", version=ansible_version,
+ category="Information", event="setup", autosupport="false"):
+ ems_log = zapi.NaElement('ems-autosupport-log')
+ # Host name invoking the API.
+ ems_log.add_new_child("computer-name", name)
+ # ID of event. A user defined event-id, range [0..2^32-2].
+ ems_log.add_new_child("event-id", id)
+ # Name of the application invoking the API.
+ ems_log.add_new_child("event-source", source)
+ # Version of application invoking the API.
+ ems_log.add_new_child("app-version", version)
+ # Application defined category of the event.
+ ems_log.add_new_child("category", category)
+ # Description of event to log. An application defined message to log.
+ ems_log.add_new_child("event-description", event)
+ ems_log.add_new_child("log-level", "6")
+ ems_log.add_new_child("auto-support", autosupport)
+ server.invoke_successfully(ems_log, True)
+
+
+def get_cserver_zapi(server):
+ vserver_info = zapi.NaElement('vserver-get-iter')
+ query_details = zapi.NaElement.create_node_with_children('vserver-info', **{'vserver-type': 'admin'})
+ query = zapi.NaElement('query')
+ query.add_child_elem(query_details)
+ vserver_info.add_child_elem(query)
+ result = server.invoke_successfully(vserver_info,
+ enable_tunneling=False)
+ attribute_list = result.get_child_by_name('attributes-list')
+ vserver_list = attribute_list.get_child_by_name('vserver-info')
+ return vserver_list.get_child_content('vserver-name')
+
+
+def get_cserver(connection, is_rest=False):
+ if not is_rest:
+ return get_cserver_zapi(connection)
+
+ params = {'fields': 'type'}
+ api = "private/cli/vserver"
+ json, error = connection.get(api, params)
+ if json is None or error is not None:
+ # exit if there is an error or no data
+ return None
+ vservers = json.get('records')
+ if vservers is not None:
+ for vserver in vservers:
+ if vserver['type'] == 'admin': # cluster admin
+ return vserver['vserver']
+ if len(vservers) == 1: # assume vserver admin
+ return vservers[0]['vserver']
+
+ return None
+
+
+class OntapRestAPI(object):
+ def __init__(self, module, timeout=60):
+ self.module = module
+ self.username = self.module.params['username']
+ self.password = self.module.params['password']
+ self.hostname = self.module.params['hostname']
+ self.use_rest = self.module.params['use_rest']
+ self.verify = self.module.params['validate_certs']
+ self.timeout = timeout
+ self.url = 'https://' + self.hostname + '/api/'
+ self.errors = list()
+ self.debug_logs = list()
+ self.check_required_library()
+
+ def check_required_library(self):
+ if not HAS_REQUESTS:
+ self.module.fail_json(msg=missing_required_lib('requests'))
+
+ def send_request(self, method, api, params, json=None, return_status_code=False):
+ ''' send http request and process reponse, including error conditions '''
+ url = self.url + api
+ status_code = None
+ content = None
+ json_dict = None
+ json_error = None
+ error_details = None
+
+ def get_json(response):
+ ''' extract json, and error message if present '''
+ try:
+ json = response.json()
+ except ValueError:
+ return None, None
+ error = json.get('error')
+ return json, error
+
+ try:
+ response = requests.request(method, url, verify=self.verify, auth=(self.username, self.password), params=params, timeout=self.timeout, json=json)
+ content = response.content # for debug purposes
+ status_code = response.status_code
+ # If the response was successful, no Exception will be raised
+ response.raise_for_status()
+ json_dict, json_error = get_json(response)
+ except requests.exceptions.HTTPError as err:
+ __, json_error = get_json(response)
+ if json_error is None:
+ self.log_error(status_code, 'HTTP error: %s' % err)
+ error_details = str(err)
+ # If an error was reported in the json payload, it is handled below
+ except requests.exceptions.ConnectionError as err:
+ self.log_error(status_code, 'Connection error: %s' % err)
+ error_details = str(err)
+ except Exception as err:
+ self.log_error(status_code, 'Other error: %s' % err)
+ error_details = str(err)
+ if json_error is not None:
+ self.log_error(status_code, 'Endpoint error: %d: %s' % (status_code, json_error))
+ error_details = json_error
+ self.log_debug(status_code, content)
+ if return_status_code:
+ return status_code, error_details
+ return json_dict, error_details
+
+ def get(self, api, params):
+ method = 'GET'
+ return self.send_request(method, api, params)
+
+ def post(self, api, data, params=None):
+ method = 'POST'
+ return self.send_request(method, api, params, json=data)
+
+ def patch(self, api, data, params=None):
+ method = 'PATCH'
+ return self.send_request(method, api, params, json=data)
+
+ def delete(self, api, data, params=None):
+ method = 'DELETE'
+ return self.send_request(method, api, params, json=data)
+
+ def _is_rest(self, used_unsupported_rest_properties=None):
+ if self.use_rest == "Always":
+ if used_unsupported_rest_properties:
+ error = "REST API currently does not support '%s'" % \
+ ', '.join(used_unsupported_rest_properties)
+ return True, error
+ else:
+ return True, None
+ if self.use_rest == 'Never' or used_unsupported_rest_properties:
+ # force ZAPI if requested or if some parameter requires it
+ return False, None
+ method = 'HEAD'
+ api = 'cluster/software'
+ status_code, __ = self.send_request(method, api, params=None, return_status_code=True)
+ if status_code == 200:
+ return True, None
+ return False, None
+
+ def is_rest(self, used_unsupported_rest_properties=None):
+ ''' only return error if there is a reason to '''
+ use_rest, error = self._is_rest(used_unsupported_rest_properties)
+ if used_unsupported_rest_properties is None:
+ return use_rest
+ return use_rest, error
+
+ def log_error(self, status_code, message):
+ self.errors.append(message)
+ self.debug_logs.append((status_code, message))
+
+ def log_debug(self, status_code, content):
+ self.debug_logs.append((status_code, content))
+
+
+class AwsCvsRestAPI(object):
+ def __init__(self, module, timeout=60):
+ self.module = module
+ self.api_key = self.module.params['api_key']
+ self.secret_key = self.module.params['secret_key']
+ self.api_url = self.module.params['api_url']
+ self.verify = self.module.params['validate_certs']
+ self.timeout = timeout
+ self.url = 'https://' + self.api_url + '/v1/'
+ self.check_required_library()
+
+ def check_required_library(self):
+ if not HAS_REQUESTS:
+ self.module.fail_json(msg=missing_required_lib('requests'))
+
+ def send_request(self, method, api, params, json=None):
+ ''' send http request and process reponse, including error conditions '''
+ url = self.url + api
+ status_code = None
+ content = None
+ json_dict = None
+ json_error = None
+ error_details = None
+ headers = {
+ 'Content-type': "application/json",
+ 'api-key': self.api_key,
+ 'secret-key': self.secret_key,
+ 'Cache-Control': "no-cache",
+ }
+
+ def get_json(response):
+ ''' extract json, and error message if present '''
+ try:
+ json = response.json()
+
+ except ValueError:
+ return None, None
+ success_code = [200, 201, 202]
+ if response.status_code not in success_code:
+ error = json.get('message')
+ else:
+ error = None
+ return json, error
+ try:
+ response = requests.request(method, url, headers=headers, timeout=self.timeout, json=json)
+ status_code = response.status_code
+ # If the response was successful, no Exception will be raised
+ json_dict, json_error = get_json(response)
+ except requests.exceptions.HTTPError as err:
+ __, json_error = get_json(response)
+ if json_error is None:
+ error_details = str(err)
+ except requests.exceptions.ConnectionError as err:
+ error_details = str(err)
+ except Exception as err:
+ error_details = str(err)
+ if json_error is not None:
+ error_details = json_error
+
+ return json_dict, error_details
+
+ # If an error was reported in the json payload, it is handled below
+ def get(self, api, params=None):
+ method = 'GET'
+ return self.send_request(method, api, params)
+
+ def post(self, api, data, params=None):
+ method = 'POST'
+ return self.send_request(method, api, params, json=data)
+
+ def patch(self, api, data, params=None):
+ method = 'PATCH'
+ return self.send_request(method, api, params, json=data)
+
+ def put(self, api, data, params=None):
+ method = 'PUT'
+ return self.send_request(method, api, params, json=data)
+
+ def delete(self, api, data, params=None):
+ method = 'DELETE'
+ return self.send_request(method, api, params, json=data)
+
+ def get_state(self, jobId):
+ """ Method to get the state of the job """
+ method = 'GET'
+ response, status_code = self.get('Jobs/%s' % jobId)
+ while str(response['state']) not in 'done':
+ response, status_code = self.get('Jobs/%s' % jobId)
+ return 'done'
diff --git a/ansible_collections/netapp_eseries/santricity/plugins/module_utils/santricity.py b/ansible_collections/netapp_eseries/santricity/plugins/module_utils/santricity.py
new file mode 100644
index 000000000..42111d98b
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/plugins/module_utils/santricity.py
@@ -0,0 +1,465 @@
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import json
+import random
+import mimetypes
+
+from pprint import pformat
+from ansible.module_utils import six
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
+from ansible.module_utils.urls import open_url
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils._text import to_native
+try:
+ from ansible.module_utils.ansible_release import __version__ as ansible_version
+except ImportError:
+ ansible_version = 'unknown'
+
+try:
+ from urlparse import urlparse, urlunparse
+except ImportError:
+ from urllib.parse import urlparse, urlunparse
+
+
+def eseries_host_argument_spec():
+ """Retrieve a base argument specification common to all NetApp E-Series modules"""
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ api_username=dict(type="str", required=True),
+ api_password=dict(type="str", required=True, no_log=True),
+ api_url=dict(type="str", required=True),
+ ssid=dict(type="str", required=False, default="1"),
+ validate_certs=dict(type="bool", required=False, default=True)
+ ))
+ return argument_spec
+
+
+def eseries_proxy_argument_spec():
+ """Retrieve a base argument specification common to all NetApp E-Series modules for proxy specific tasks"""
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ api_username=dict(type="str", required=True),
+ api_password=dict(type="str", required=True, no_log=True),
+ api_url=dict(type="str", required=True),
+ validate_certs=dict(type="bool", required=False, default=True)
+ ))
+ return argument_spec
+
+
+class NetAppESeriesModule(object):
+ """Base class for all NetApp E-Series modules.
+
+ Provides a set of common methods for NetApp E-Series modules, including version checking, mode (proxy, embedded)
+ verification, http requests, secure http redirection for embedded web services, and logging setup.
+
+ Be sure to add the following lines in the module's documentation section:
+ extends_documentation_fragment:
+ - santricity
+
+ :param dict(dict) ansible_options: dictionary of ansible option definitions
+ :param str web_services_version: minimally required web services rest api version (default value: "02.00.0000.0000")
+ :param bool supports_check_mode: whether the module will support the check_mode capabilities (default=False)
+ :param list(list) mutually_exclusive: list containing list(s) of mutually exclusive options (optional)
+ :param list(list) required_if: list containing list(s) containing the option, the option value, and then a list of required options. (optional)
+ :param list(list) required_one_of: list containing list(s) of options for which at least one is required. (optional)
+ :param list(list) required_together: list containing list(s) of options that are required together. (optional)
+ :param bool log_requests: controls whether to log each request (default: True)
+ :param bool proxy_specific_task: controls whether ssid is a default option (default: False)
+ """
+ DEFAULT_TIMEOUT = 300
+ DEFAULT_SECURE_PORT = "8443"
+ DEFAULT_BASE_PATH = "devmgr/"
+ DEFAULT_REST_API_PATH = "devmgr/v2/"
+ DEFAULT_REST_API_ABOUT_PATH = "devmgr/utils/about"
+ DEFAULT_HEADERS = {"Content-Type": "application/json", "Accept": "application/json",
+ "netapp-client-type": "Ansible-%s" % ansible_version}
+ HTTP_AGENT = "Ansible / %s" % ansible_version
+ SIZE_UNIT_MAP = dict(bytes=1, b=1, kb=1024, mb=1024**2, gb=1024**3, tb=1024**4,
+ pb=1024**5, eb=1024**6, zb=1024**7, yb=1024**8)
+
+ HOST_TYPE_INDEXES = {"aix mpio": 9, "avt 4m": 5, "hp-ux": 15, "linux atto": 24, "linux dm-mp": 28, "linux pathmanager": 25, "solaris 10 or earlier": 2,
+ "solaris 11 or later": 17, "svc": 18, "ontap": 26, "mac": 22, "vmware": 10, "windows": 1, "windows atto": 23, "windows clustered": 8}
+
+ def __init__(self, ansible_options, web_services_version=None, supports_check_mode=False,
+ mutually_exclusive=None, required_if=None, required_one_of=None, required_together=None,
+ log_requests=True, proxy_specific_task=False):
+
+ if proxy_specific_task:
+ argument_spec = eseries_proxy_argument_spec()
+ else:
+ argument_spec = eseries_host_argument_spec()
+
+ argument_spec.update(ansible_options)
+
+ self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=supports_check_mode,
+ mutually_exclusive=mutually_exclusive, required_if=required_if,
+ required_one_of=required_one_of, required_together=required_together)
+
+ args = self.module.params
+ self.web_services_version = web_services_version if web_services_version else "02.00.0000.0000"
+
+ if proxy_specific_task:
+ self.ssid = "0"
+ else:
+ self.ssid = args["ssid"]
+ self.url = args["api_url"]
+ self.log_requests = log_requests
+ self.creds = dict(url_username=args["api_username"],
+ url_password=args["api_password"],
+ validate_certs=args["validate_certs"])
+
+ if not self.url.endswith("/"):
+ self.url += "/"
+
+ self.is_proxy_used_cache = None
+ self.is_embedded_available_cache = None
+ self.is_web_services_valid_cache = None
+
+ def _check_ssid(self):
+ """Verify storage system identifier exist on the proxy and, if not, then update to match storage system name."""
+ try:
+ rc, data = self._request(url=self.url + self.DEFAULT_REST_API_ABOUT_PATH, **self.creds)
+
+ if data["runningAsProxy"]:
+ if self.ssid.lower() not in ["proxy", "0"]:
+ try:
+ rc, systems = self._request(url=self.url + self.DEFAULT_REST_API_PATH + "storage-systems", **self.creds)
+ alternates = []
+ for system in systems:
+ if system["id"] == self.ssid:
+ break
+ elif system["name"] == self.ssid:
+ alternates.append(system["id"])
+ else:
+ if len(alternates) == 1:
+ self.module.warn("Array Id does not exist on Web Services Proxy Instance! However, there is a storage system with a"
+ " matching name. Updating Identifier. Array Name: [%s], Array Id [%s]." % (self.ssid, alternates[0]))
+ self.ssid = alternates[0]
+ else:
+ self.module.fail_json(msg="Array identifier does not exist on Web Services Proxy Instance! Array ID [%s]." % self.ssid)
+
+ except Exception as error:
+ self.module.fail_json(msg="Failed to determine Web Services Proxy storage systems! Array [%s]. Error [%s]" % (self.ssid, to_native(error)))
+ except Exception as error:
+ # Don't fail here, if the ssid is wrong the it will fail on the next request. Causes issues for na_santricity_auth module.
+ pass
+
+ def _check_web_services_version(self):
+ """Verify proxy or embedded web services meets minimum version required for module.
+
+ The minimum required web services version is evaluated against version supplied through the web services rest
+ api. AnsibleFailJson exception will be raised when the minimum is not met or exceeded.
+
+ This helper function will update the supplied api url if secure http is not used for embedded web services
+
+ :raise AnsibleFailJson: raised when the contacted api service does not meet the minimum required version.
+ """
+ if not self.is_web_services_valid_cache:
+
+ url_parts = urlparse(self.url)
+ if not url_parts.scheme or not url_parts.netloc:
+ self.module.fail_json(msg="Failed to provide valid API URL. Example: https://192.168.1.100:8443/devmgr/v2. URL [%s]." % self.url)
+
+ if url_parts.scheme not in ["http", "https"]:
+ self.module.fail_json(msg="Protocol must be http or https. URL [%s]." % self.url)
+
+ self.url = "%s://%s/" % (url_parts.scheme, url_parts.netloc)
+ about_url = self.url + self.DEFAULT_REST_API_ABOUT_PATH
+ rc, data = request(about_url, timeout=self.DEFAULT_TIMEOUT, headers=self.DEFAULT_HEADERS, ignore_errors=True, force_basic_auth=False, **self.creds)
+
+ if rc != 200:
+ self.module.warn("Failed to retrieve web services about information! Retrying with secure ports. Array Id [%s]." % self.ssid)
+ self.url = "https://%s:8443/" % url_parts.netloc.split(":")[0]
+ about_url = self.url + self.DEFAULT_REST_API_ABOUT_PATH
+ try:
+ rc, data = request(about_url, timeout=self.DEFAULT_TIMEOUT, headers=self.DEFAULT_HEADERS, **self.creds)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve the webservices about information! Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(error)))
+
+ if len(data["version"].split(".")) == 4:
+ major, minor, other, revision = data["version"].split(".")
+ minimum_major, minimum_minor, other, minimum_revision = self.web_services_version.split(".")
+
+ if not (major > minimum_major or
+ (major == minimum_major and minor > minimum_minor) or
+ (major == minimum_major and minor == minimum_minor and revision >= minimum_revision)):
+ self.module.fail_json(msg="Web services version does not meet minimum version required. Current version: [%s]."
+ " Version required: [%s]." % (data["version"], self.web_services_version))
+ self.module.log("Web services rest api version met the minimum required version.")
+ else:
+ self.module.warn("Web services rest api version unknown!")
+
+ self._check_ssid()
+ self.is_web_services_valid_cache = True
+
+ def is_web_services_version_met(self, version):
+ """Determines whether a particular web services version has been satisfied."""
+ split_version = version.split(".")
+ if len(split_version) != 4 or not split_version[0].isdigit() or not split_version[1].isdigit() or not split_version[3].isdigit():
+ self.module.fail_json(msg="Version is not a valid Web Services version. Version [%s]." % version)
+
+ url_parts = urlparse(self.url)
+ if not url_parts.scheme or not url_parts.netloc:
+ self.module.fail_json(msg="Failed to provide valid API URL. Example: https://192.168.1.100:8443/devmgr/v2. URL [%s]." % self.url)
+
+ if url_parts.scheme not in ["http", "https"]:
+ self.module.fail_json(msg="Protocol must be http or https. URL [%s]." % self.url)
+
+ self.url = "%s://%s/" % (url_parts.scheme, url_parts.netloc)
+ about_url = self.url + self.DEFAULT_REST_API_ABOUT_PATH
+ rc, data = request(about_url, timeout=self.DEFAULT_TIMEOUT, headers=self.DEFAULT_HEADERS, ignore_errors=True, **self.creds)
+
+ if rc != 200:
+ self.module.warn("Failed to retrieve web services about information! Retrying with secure ports. Array Id [%s]." % self.ssid)
+ self.url = "https://%s:8443/" % url_parts.netloc.split(":")[0]
+ about_url = self.url + self.DEFAULT_REST_API_ABOUT_PATH
+ try:
+ rc, data = request(about_url, timeout=self.DEFAULT_TIMEOUT, headers=self.DEFAULT_HEADERS, **self.creds)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve the webservices about information! Array Id [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ if len(data["version"].split(".")) == 4:
+ major, minor, other, revision = data["version"].split(".")
+ minimum_major, minimum_minor, other, minimum_revision = split_version
+ if not (major > minimum_major or
+ (major == minimum_major and minor > minimum_minor) or
+ (major == minimum_major and minor == minimum_minor and revision >= minimum_revision)):
+ return False
+ else:
+ return False
+ return True
+
+ def is_embedded_available(self):
+ """Determine whether the storage array has embedded services available."""
+ self._check_web_services_version()
+
+ if self.is_embedded_available_cache is None:
+
+ if self.is_proxy():
+ if self.ssid == "0" or self.ssid.lower() == "proxy":
+ self.is_embedded_available_cache = False
+ else:
+ try:
+ rc, bundle = self.request("storage-systems/%s/graph/xpath-filter?query=/sa/saData/extendedSAData/codeVersions[codeModule='bundle']"
+ % self.ssid)
+ self.is_embedded_available_cache = False
+ if bundle:
+ self.is_embedded_available_cache = True
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve information about storage system [%s]. Error [%s]." % (self.ssid, to_native(error)))
+ else: # Contacted using embedded web services
+ self.is_embedded_available_cache = True
+
+ self.module.log("embedded_available: [%s]" % ("True" if self.is_embedded_available_cache else "False"))
+ return self.is_embedded_available_cache
+
+ def is_embedded(self):
+ """Determine whether web services server is the embedded web services."""
+ return not self.is_proxy()
+
+ def is_proxy(self):
+ """Determine whether web services server is the proxy web services.
+
+ :raise AnsibleFailJson: raised when web services about endpoint failed to be contacted.
+ :return bool: whether contacted web services is running from storage array (embedded) or from a proxy.
+ """
+ self._check_web_services_version()
+
+ if self.is_proxy_used_cache is None:
+ about_url = self.url + self.DEFAULT_REST_API_ABOUT_PATH
+ try:
+ rc, data = request(about_url, timeout=self.DEFAULT_TIMEOUT, headers=self.DEFAULT_HEADERS, force_basic_auth=False, **self.creds)
+ self.is_proxy_used_cache = data["runningAsProxy"]
+
+ self.module.log("proxy: [%s]" % ("True" if self.is_proxy_used_cache else "False"))
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve the webservices about information! Array Id [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ return self.is_proxy_used_cache
+
+ def request(self, path, rest_api_path=DEFAULT_REST_API_PATH, rest_api_url=None, data=None, method='GET', headers=None, ignore_errors=False, timeout=None,
+ force_basic_auth=True, log_request=None, json_response=True):
+ """Issue an HTTP request to a url, retrieving an optional JSON response.
+
+ :param str path: web services rest api endpoint path (Example: storage-systems/1/graph). Note that when the
+ full url path is specified then that will be used without supplying the protocol, hostname, port and rest path.
+ :param str rest_api_path: override the class DEFAULT_REST_API_PATH which is used to build the request URL.
+ :param str rest_api_url: override the class url member which contains the base url for web services.
+ :param data: data required for the request (data may be json or any python structured data)
+ :param str method: request method such as GET, POST, DELETE.
+ :param dict headers: dictionary containing request headers.
+ :param bool ignore_errors: forces the request to ignore any raised exceptions.
+ :param int timeout: duration of seconds before request finally times out.
+ :param bool force_basic_auth: Ensure that basic authentication is being used.
+ :param bool log_request: Log the request and response
+ :param bool json_response: Whether the response should be loaded as JSON, otherwise the response is return raw.
+ """
+ self._check_web_services_version()
+
+ if rest_api_url is None:
+ rest_api_url = self.url
+ if headers is None:
+ headers = self.DEFAULT_HEADERS
+ if timeout is None:
+ timeout = self.DEFAULT_TIMEOUT
+ if log_request is None:
+ log_request = self.log_requests
+
+ if not isinstance(data, str) and "Content-Type" in headers and headers["Content-Type"] == "application/json":
+ data = json.dumps(data)
+
+ if path.startswith("/"):
+ path = path[1:]
+ request_url = rest_api_url + rest_api_path + path
+
+ if log_request:
+ self.module.log(pformat(dict(url=request_url, data=data, method=method, headers=headers)))
+
+ response = self._request(url=request_url, data=data, method=method, headers=headers, last_mod_time=None, timeout=timeout, http_agent=self.HTTP_AGENT,
+ force_basic_auth=force_basic_auth, ignore_errors=ignore_errors, json_response=json_response, **self.creds)
+ if log_request:
+ self.module.log(pformat(response))
+
+ return response
+
+ @staticmethod
+ def _request(url, data=None, headers=None, method='GET', use_proxy=True, force=False, last_mod_time=None, timeout=10, validate_certs=True,
+ url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False, json_response=True):
+ """Issue an HTTP request to a url, retrieving an optional JSON response."""
+
+ if headers is None:
+ headers = {"Content-Type": "application/json", "Accept": "application/json"}
+ headers.update({"netapp-client-type": "Ansible-%s" % ansible_version})
+
+ if not http_agent:
+ http_agent = "Ansible / %s" % ansible_version
+
+ try:
+ r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy, force=force, last_mod_time=last_mod_time, timeout=timeout,
+ validate_certs=validate_certs, url_username=url_username, url_password=url_password, http_agent=http_agent,
+ force_basic_auth=force_basic_auth)
+ rc = r.getcode()
+ response = r.read()
+ if json_response and response:
+ response = json.loads(response)
+
+ except HTTPError as error:
+ rc = error.code
+ response = error.fp.read()
+ try:
+ if json_response:
+ response = json.loads(response)
+ except Exception:
+ pass
+
+ if not ignore_errors:
+ raise Exception(rc, response)
+ except ValueError as error:
+ pass
+
+ return rc, response
+
+
+def create_multipart_formdata(files, fields=None, send_8kb=False):
+ """Create the data for a multipart/form request.
+
+ :param list(list) files: list of lists each containing (name, filename, path).
+ :param list(list) fields: list of lists each containing (key, value).
+ :param bool send_8kb: only sends the first 8kb of the files (default: False).
+ """
+ boundary = "---------------------------" + "".join([str(random.randint(0, 9)) for x in range(27)])
+ data_parts = list()
+ data = None
+
+ if six.PY2: # Generate payload for Python 2
+ newline = "\r\n"
+ if fields is not None:
+ for key, value in fields:
+ data_parts.extend(["--%s" % boundary,
+ 'Content-Disposition: form-data; name="%s"' % key,
+ "",
+ value])
+
+ for name, filename, path in files:
+ with open(path, "rb") as fh:
+ value = fh.read(8192) if send_8kb else fh.read()
+
+ data_parts.extend(["--%s" % boundary,
+ 'Content-Disposition: form-data; name="%s"; filename="%s"' % (name, filename),
+ "Content-Type: %s" % (mimetypes.guess_type(path)[0] or "application/octet-stream"),
+ "",
+ value])
+ data_parts.extend(["--%s--" % boundary, ""])
+ data = newline.join(data_parts)
+
+ else:
+ newline = six.b("\r\n")
+ if fields is not None:
+ for key, value in fields:
+ data_parts.extend([six.b("--%s" % boundary),
+ six.b('Content-Disposition: form-data; name="%s"' % key),
+ six.b(""),
+ six.b(value)])
+
+ for name, filename, path in files:
+ with open(path, "rb") as fh:
+ value = fh.read(8192) if send_8kb else fh.read()
+
+ data_parts.extend([six.b("--%s" % boundary),
+ six.b('Content-Disposition: form-data; name="%s"; filename="%s"' % (name, filename)),
+ six.b("Content-Type: %s" % (mimetypes.guess_type(path)[0] or "application/octet-stream")),
+ six.b(""),
+ value])
+ data_parts.extend([six.b("--%s--" % boundary), b""])
+ data = newline.join(data_parts)
+
+ headers = {
+ "Content-Type": "multipart/form-data; boundary=%s" % boundary,
+ "Content-Length": str(len(data))}
+
+ return headers, data
+
+
+def request(url, data=None, headers=None, method='GET', use_proxy=True,
+ force=False, last_mod_time=None, timeout=10, validate_certs=True,
+ url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
+ """Issue an HTTP request to a url, retrieving an optional JSON response."""
+
+ if headers is None:
+ headers = {"Content-Type": "application/json", "Accept": "application/json"}
+ headers.update({"netapp-client-type": "Ansible-%s" % ansible_version})
+
+ if not http_agent:
+ http_agent = "Ansible / %s" % ansible_version
+
+ try:
+ r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
+ force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
+ url_username=url_username, url_password=url_password, http_agent=http_agent,
+ force_basic_auth=force_basic_auth)
+ except HTTPError as err:
+ r = err.fp
+
+ try:
+ raw_data = r.read()
+ if raw_data:
+ data = json.loads(raw_data)
+ else:
+ raw_data = None
+ except Exception:
+ if ignore_errors:
+ pass
+ else:
+ raise Exception(raw_data)
+
+ resp_code = r.getcode()
+
+ if resp_code >= 400 and not ignore_errors:
+ raise Exception(resp_code, data)
+ else:
+ return resp_code, data
diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_alerts.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_alerts.py
new file mode 100644
index 000000000..2c105b773
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_alerts.py
@@ -0,0 +1,253 @@
+#!/usr/bin/python
+
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+---
+module: na_santricity_alerts
+short_description: NetApp E-Series manage email notification settings
+description:
+ - Certain E-Series systems have the capability to send email notifications on potentially critical events.
+ - This module will allow the owner of the system to specify email recipients for these messages.
+author: Michael Price (@lmprice)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.santricity_doc
+options:
+ state:
+ description:
+ - Enable/disable the sending of email-based alerts.
+ type: str
+ default: enabled
+ required: false
+ choices:
+ - enabled
+ - disabled
+ server:
+ description:
+ - A fully qualified domain name, IPv4 address, or IPv6 address of a mail server.
+ - To use a fully qualified domain name, you must configure a DNS server on both controllers using
+ M(na_santricity_mgmt_interface).
+ - Required when I(state=enabled).
+ type: str
+ required: false
+ sender:
+ description:
+ - This is the sender that the recipient will see. It doesn't necessarily need to be a valid email account.
+ - Required when I(state=enabled).
+ type: str
+ required: false
+ contact:
+ description:
+ - Allows the owner to specify some free-form contact information to be included in the emails.
+ - This is typically utilized to provide a contact phone number.
+ type: str
+ required: false
+ recipients:
+ description:
+ - The email addresses that will receive the email notifications.
+ - Required when I(state=enabled).
+ type: list
+ required: false
+ test:
+ description:
+ - When a change is detected in the configuration, a test email will be sent.
+ - This may take a few minutes to process.
+ - Only applicable if I(state=enabled).
+ type: bool
+ default: false
+notes:
+ - Check mode is supported.
+ - Alertable messages are a subset of messages shown by the Major Event Log (MEL), of the storage-system. Examples
+ of alertable messages include drive failures, failed controllers, loss of redundancy, and other warning/critical
+ events.
+ - This API is currently only supported with the Embedded Web Services API v2.0 and higher.
+"""
+
+EXAMPLES = """
+ - name: Enable email-based alerting
+ na_santricity_alerts:
+ state: enabled
+ sender: noreply@example.com
+ server: mail@example.com
+ contact: "Phone: 1-555-555-5555"
+ recipients:
+ - name1@example.com
+ - name2@example.com
+ api_url: "10.1.1.1:8443"
+ api_username: "admin"
+ api_password: "myPass"
+
+ - name: Disable alerting
+ na_santricity_alerts:
+ state: disabled
+ api_url: "10.1.1.1:8443"
+ api_username: "admin"
+ api_password: "myPass"
+"""
+
+RETURN = """
+msg:
+ description: Success message
+ returned: on success
+ type: str
+ sample: The settings have been updated.
+"""
+import re
+
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule
+from ansible.module_utils._text import to_native
+
+
+class NetAppESeriesAlerts(NetAppESeriesModule):
+ def __init__(self):
+ ansible_options = dict(state=dict(type='str', required=False, default='enabled', choices=['enabled', 'disabled']),
+ server=dict(type='str', required=False),
+ sender=dict(type='str', required=False),
+ contact=dict(type='str', required=False),
+ recipients=dict(type='list', required=False),
+ test=dict(type='bool', required=False, default=False))
+
+ required_if = [['state', 'enabled', ['server', 'sender', 'recipients']]]
+ super(NetAppESeriesAlerts, self).__init__(ansible_options=ansible_options,
+ web_services_version="02.00.0000.0000",
+ required_if=required_if,
+ supports_check_mode=True)
+
+ args = self.module.params
+ self.alerts = args['state'] == 'enabled'
+ self.server = args['server']
+ self.sender = args['sender']
+ self.contact = args['contact']
+ self.recipients = args['recipients']
+ self.test = args['test']
+ self.check_mode = self.module.check_mode
+
+ # Very basic validation on email addresses: xx@yy.zz
+ email = re.compile(r"[^@]+@[^@]+\.[^@]+")
+
+ if self.sender and not email.match(self.sender):
+ self.module.fail_json(msg="The sender (%s) provided is not a valid email address." % self.sender)
+
+ if self.recipients is not None:
+ for recipient in self.recipients:
+ if not email.match(recipient):
+ self.module.fail_json(msg="The recipient (%s) provided is not a valid email address." % recipient)
+
+ if len(self.recipients) < 1:
+ self.module.fail_json(msg="At least one recipient address must be specified.")
+
+ def get_configuration(self):
+ """Retrieve the current storage system alert settings."""
+ if self.is_proxy():
+ if self.is_embedded_available():
+ try:
+ rc, result = self.request("storage-systems/%s/forward/devmgr/v2/storage-systems/1/device-alerts" % self.ssid)
+ return result
+ except Exception as err:
+ self.module.fail_json(msg="Failed to retrieve the alerts configuration! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+ else:
+ self.module.fail_json(msg="Setting SANtricity alerts is only available from SANtricity Web Services Proxy if the storage system has"
+ " SANtricity Web Services Embedded available. Array [%s]." % self.ssid)
+ else:
+ try:
+ rc, result = self.request("storage-systems/%s/device-alerts" % self.ssid)
+ return result
+ except Exception as err:
+ self.module.fail_json(msg="Failed to retrieve the alerts configuration! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+
+ def update_configuration(self):
+ """Update the storage system alert settings."""
+ config = self.get_configuration()
+ update = False
+ body = dict()
+
+ if self.alerts:
+ body = dict(alertingEnabled=True)
+ if not config['alertingEnabled']:
+ update = True
+
+ body.update(emailServerAddress=self.server)
+ if config['emailServerAddress'] != self.server:
+ update = True
+
+ body.update(additionalContactInformation=self.contact, sendAdditionalContactInformation=True)
+ if self.contact and (self.contact != config['additionalContactInformation']
+ or not config['sendAdditionalContactInformation']):
+ update = True
+
+ body.update(emailSenderAddress=self.sender)
+ if config['emailSenderAddress'] != self.sender:
+ update = True
+
+ self.recipients.sort()
+ if config['recipientEmailAddresses']:
+ config['recipientEmailAddresses'].sort()
+
+ body.update(recipientEmailAddresses=self.recipients)
+ if config['recipientEmailAddresses'] != self.recipients:
+ update = True
+
+ elif config['alertingEnabled']:
+ body = {"alertingEnabled": False, "emailServerAddress": "", "emailSenderAddress": "", "sendAdditionalContactInformation": False,
+ "additionalContactInformation": "", "recipientEmailAddresses": []}
+ update = True
+
+ if update and not self.check_mode:
+ if self.is_proxy() and self.is_embedded_available():
+ try:
+ rc, result = self.request("storage-systems/%s/forward/devmgr/v2/storage-systems/1/device-alerts" % self.ssid, method="POST", data=body)
+ except Exception as err:
+ self.module.fail_json(msg="We failed to set the storage-system name! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+
+ else:
+ try:
+ rc, result = self.request("storage-systems/%s/device-alerts" % self.ssid, method="POST", data=body)
+ except Exception as err:
+ self.module.fail_json(msg="We failed to set the storage-system name! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+
+ return update
+
+ def send_test_email(self):
+ """Send a test email to verify that the provided configuration is valid and functional."""
+ if not self.check_mode:
+ if self.is_proxy() and self.is_embedded_available():
+ try:
+ rc, resp = self.request("storage-systems/%s/forward/devmgr/v2/storage-systems/1/device-alerts/alert-email-test" % self.ssid, method="POST")
+ if resp['response'] != 'emailSentOK':
+ self.module.fail_json(msg="The test email failed with status=[%s]! Array Id [%s]." % (resp['response'], self.ssid))
+ except Exception as err:
+ self.module.fail_json(msg="We failed to send the test email! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+
+ else:
+ try:
+ rc, resp = self.request("storage-systems/%s/device-alerts/alert-email-test" % self.ssid, method="POST")
+ if resp['response'] != 'emailSentOK':
+ self.module.fail_json(msg="The test email failed with status=[%s]! Array Id [%s]." % (resp['response'], self.ssid))
+ except Exception as err:
+ self.module.fail_json(msg="We failed to send the test email! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+
+ def update(self):
+ update = self.update_configuration()
+
+ if self.test and update:
+ self.send_test_email()
+
+ if self.alerts:
+ msg = 'Alerting has been enabled using server=%s, sender=%s.' % (self.server, self.sender)
+ else:
+ msg = 'Alerting has been disabled.'
+
+ self.module.exit_json(msg=msg, changed=update)
+
+
+def main():
+ alerts = NetAppESeriesAlerts()
+ alerts.update()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_alerts_syslog.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_alerts_syslog.py
new file mode 100644
index 000000000..9a50dea0c
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_alerts_syslog.py
@@ -0,0 +1,176 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+---
+module: na_santricity_alerts_syslog
+short_description: NetApp E-Series manage syslog servers receiving storage system alerts.
+description:
+ - Manage the list of syslog servers that will notifications on potentially critical events.
+author: Nathan Swartz (@ndswartz)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.santricity_doc
+options:
+ servers:
+ description:
+ - List of dictionaries where each dictionary contains a syslog server entry.
+ type: list
+ required: False
+ suboptions:
+ address:
+ description:
+ - Syslog server address can be a fully qualified domain name, IPv4 address, or IPv6 address.
+ required: true
+ port:
+ description:
+ - UDP Port must be a numerical value between 0 and 65535. Typically, the UDP Port for syslog is 514.
+ required: false
+ default: 514
+ test:
+ description:
+ - This forces a test syslog message to be sent to the stated syslog server.
+ - Test will only be issued when a change is made.
+ type: bool
+ default: false
+notes:
+ - Check mode is supported.
+ - This API is currently only supported with the Embedded Web Services API v2.12 (bundled with
+ SANtricity OS 11.40.2) and higher.
+"""
+
+EXAMPLES = """
+ - name: Add two syslog server configurations to NetApp E-Series storage array.
+ na_santricity_alerts_syslog:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ servers:
+ - address: "192.168.1.100"
+ - address: "192.168.2.100"
+ port: 514
+ - address: "192.168.3.100"
+ port: 1000
+"""
+
+RETURN = """
+msg:
+ description: Success message
+ returned: on success
+ type: str
+ sample: The settings have been updated.
+"""
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule
+from ansible.module_utils._text import to_native
+
+
+class NetAppESeriesAlertsSyslog(NetAppESeriesModule):
+ def __init__(self):
+ ansible_options = dict(servers=dict(type="list", required=False),
+ test=dict(type="bool", default=False, require=False))
+
+ required_if = [["state", "present", ["address"]]]
+ mutually_exclusive = [["test", "absent"]]
+ super(NetAppESeriesAlertsSyslog, self).__init__(ansible_options=ansible_options,
+ web_services_version="02.00.0000.0000",
+ mutually_exclusive=mutually_exclusive,
+ required_if=required_if,
+ supports_check_mode=True)
+ args = self.module.params
+ if args["servers"] and len(args["servers"]) > 5:
+ self.module.fail_json(msg="Maximum number of syslog servers is 5! Array Id [%s]." % self.ssid)
+
+ self.servers = {}
+ if args["servers"] is not None:
+ for server in args["servers"]:
+ port = 514
+ if "port" in server:
+ port = server["port"]
+ self.servers.update({server["address"]: port})
+
+ self.test = args["test"]
+ self.check_mode = self.module.check_mode
+
+ # Check whether request needs to be forwarded on to the controller web services rest api.
+ self.url_path_prefix = ""
+ if not self.is_embedded() and self.ssid != "0" and self.ssid.lower() != "proxy":
+ self.url_path_prefix = "storage-systems/%s/forward/devmgr/v2/" % self.ssid
+
+ def get_current_configuration(self):
+ """Retrieve existing alert-syslog configuration."""
+ try:
+ rc, result = self.request(self.url_path_prefix + "storage-systems/%s/device-alerts/alert-syslog" % ("1" if self.url_path_prefix else self.ssid))
+ return result
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve syslog configuration! Array Id [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ def is_change_required(self):
+ """Determine whether changes are required."""
+ current_config = self.get_current_configuration()
+
+ # When syslog servers should exist, search for them.
+ if self.servers:
+ for entry in current_config["syslogReceivers"]:
+ if entry["serverName"] not in self.servers.keys() or entry["portNumber"] != self.servers[entry["serverName"]]:
+ return True
+
+ for server, port in self.servers.items():
+ for entry in current_config["syslogReceivers"]:
+ if server == entry["serverName"] and port == entry["portNumber"]:
+ break
+ else:
+ return True
+ return False
+
+ elif current_config["syslogReceivers"]:
+ return True
+
+ return False
+
+ def make_request_body(self):
+ """Generate the request body."""
+ body = {"syslogReceivers": [], "defaultFacility": 3, "defaultTag": "StorageArray"}
+
+ for server, port in self.servers.items():
+ body["syslogReceivers"].append({"serverName": server, "portNumber": port})
+
+ return body
+
+ def test_configuration(self):
+ """Send syslog test message to all systems (only option)."""
+ try:
+ rc, result = self.request(self.url_path_prefix + "storage-systems/%s/device-alerts/alert-syslog-test"
+ % ("1" if self.url_path_prefix else self.ssid), method="POST")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to send test message! Array Id [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ def update(self):
+ """Update configuration and respond to ansible."""
+ change_required = self.is_change_required()
+
+ if change_required and not self.check_mode:
+ try:
+ rc, result = self.request(self.url_path_prefix + "storage-systems/%s/device-alerts/alert-syslog" % ("1" if self.url_path_prefix else self.ssid),
+ method="POST", data=self.make_request_body())
+ except Exception as error:
+ self.module.fail_json(msg="Failed to add syslog server! Array Id [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ if self.test and self.servers:
+ self.test_configuration()
+
+ self.module.exit_json(msg="The syslog settings have been updated.", changed=change_required)
+
+
+def main():
+ settings = NetAppESeriesAlertsSyslog()
+ settings.update()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_asup.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_asup.py
new file mode 100644
index 000000000..8d6a33620
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_asup.py
@@ -0,0 +1,544 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+---
+module: na_santricity_asup
+short_description: NetApp E-Series manage auto-support settings
+description:
+ - Allow the auto-support settings to be configured for an individual E-Series storage-system
+author:
+ - Michael Price (@lmprice)
+ - Nathan Swartz (@ndswartz)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.santricity_doc
+options:
+ state:
+ description:
+ - Enable/disable the E-Series auto-support configuration or maintenance mode.
+ - When this option is enabled, configuration, logs, and other support-related information will be relayed
+ to NetApp to help better support your system. No personally identifiable information, passwords, etc, will
+ be collected.
+ - The maintenance state enables the maintenance window which allows maintenance activities to be performed on the storage array without
+ generating support cases.
+ - Maintenance mode cannot be enabled unless ASUP has previously been enabled.
+ type: str
+ default: enabled
+ choices:
+ - enabled
+ - disabled
+ - maintenance_enabled
+ - maintenance_disabled
+ active:
+ description:
+ - Enable active/proactive monitoring for ASUP. When a problem is detected by our monitoring systems, it's
+ possible that the bundle did not contain all of the required information at the time of the event.
+ Enabling this option allows NetApp support personnel to manually request transmission or re-transmission
+ of support data in order ot resolve the problem.
+ - Only applicable if I(state=enabled).
+ default: true
+ type: bool
+ start:
+ description:
+ - A start hour may be specified in a range from 0 to 23 hours.
+ - ASUP bundles will be sent daily between the provided start and end time (UTC).
+ - I(start) must be less than I(end).
+ type: int
+ default: 0
+ end:
+ description:
+ - An end hour may be specified in a range from 1 to 24 hours.
+ - ASUP bundles will be sent daily between the provided start and end time (UTC).
+ - I(start) must be less than I(end).
+ type: int
+ default: 24
+ days:
+ description:
+ - A list of days of the week that ASUP bundles will be sent. A larger, weekly bundle will be sent on one
+ of the provided days.
+ type: list
+ choices:
+ - monday
+ - tuesday
+ - wednesday
+ - thursday
+ - friday
+ - saturday
+ - sunday
+ required: false
+ aliases:
+ - schedule_days
+ - days_of_week
+ method:
+ description:
+ - AutoSupport dispatch delivery method.
+ choices:
+ - https
+ - http
+ - email
+ type: str
+ required: false
+ default: https
+ routing_type:
+ description:
+ - AutoSupport routing
+ - Required when M(method==https or method==http).
+ choices:
+ - direct
+ - proxy
+ - script
+ type: str
+ default: direct
+ required: false
+ proxy:
+ description:
+ - Information particular to the proxy delivery method.
+ - Required when M((method==https or method==http) and routing_type==proxy).
+ type: dict
+ required: false
+ suboptions:
+ host:
+ description:
+ - Proxy host IP address or fully qualified domain name.
+ - Required when M(method==http or method==https) and M(routing_type==proxy).
+ type: str
+ required: false
+ port:
+ description:
+ - Proxy host port.
+ - Required when M(method==http or method==https) and M(routing_type==proxy).
+ type: int
+ required: false
+ script:
+ description:
+ - Path to the AutoSupport routing script file.
+ - Required when M(method==http or method==https) and M(routing_type==script).
+ type: str
+ required: false
+ username:
+ description:
+ - Username for the proxy.
+ type: str
+ required: false
+ password:
+ description:
+ - Password for the proxy.
+ type: str
+ required: false
+ email:
+ description:
+ - Information particular to the e-mail delivery method.
+ - Uses the SMTP protocol.
+ - Required when M(method==email).
+ type: dict
+ required: false
+ suboptions:
+ server:
+ description:
+ - Mail server's IP address or fully qualified domain name.
+ - Required when M(routing_type==email).
+ type: str
+ required: false
+ sender:
+ description:
+ - Sender's email account
+ - Required when M(routing_type==email).
+ type: str
+ required: false
+ test_recipient:
+ description:
+ - Test verification email
+ - Required when M(routing_type==email).
+ type: str
+ required: false
+ maintenance_duration:
+ description:
+ - The duration of time the ASUP maintenance mode will be active.
+ - Permittable range is between 1 and 72 hours.
+ - Required when I(state==maintenance_enabled).
+ type: int
+ default: 24
+ required: false
+ maintenance_emails:
+ description:
+ - List of email addresses for maintenance notifications.
+ - Required when I(state==maintenance_enabled).
+ type: list
+ required: false
+ validate:
+ description:
+ - Validate ASUP configuration.
+ type: bool
+ default: false
+ required: false
+notes:
+ - Check mode is supported.
+ - Enabling ASUP will allow our support teams to monitor the logs of the storage-system in order to proactively
+ respond to issues with the system. It is recommended that all ASUP-related options be enabled, but they may be
+ disabled if desired.
+ - This API is currently only supported with the Embedded Web Services API v2.0 and higher.
+"""
+
+EXAMPLES = """
+ - name: Enable ASUP and allow pro-active retrieval of bundles
+ na_santricity_asup:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ state: enabled
+ active: true
+ days: ["saturday", "sunday"]
+ start: 17
+ end: 20
+ - name: Set the ASUP schedule to only send bundles from 12 AM CST to 3 AM CST.
+ na_santricity_asup:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ state: disabled
+ - name: Set the ASUP schedule to only send bundles from 12 AM CST to 3 AM CST.
+ na_santricity_asup:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ state: maintenance_enabled
+ maintenance_duration: 24
+ maintenance_emails:
+ - admin@example.com
+ - name: Set the ASUP schedule to only send bundles from 12 AM CST to 3 AM CST.
+ na_santricity_asup:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ state: maintenance_disabled
+"""
+
+RETURN = """
+msg:
+ description: Success message
+ returned: on success
+ type: str
+ sample: The settings have been updated.
+asup:
+ description:
+ - True if ASUP is enabled.
+ returned: on success
+ sample: true
+ type: bool
+active:
+ description:
+ - True if the active option has been enabled.
+ returned: on success
+ sample: true
+ type: bool
+cfg:
+ description:
+ - Provide the full ASUP configuration.
+ returned: on success
+ type: complex
+ contains:
+ asupEnabled:
+ description:
+ - True if ASUP has been enabled.
+ type: bool
+ onDemandEnabled:
+ description:
+ - True if ASUP active monitoring has been enabled.
+ type: bool
+ daysOfWeek:
+ description:
+ - The days of the week that ASUP bundles will be sent.
+ type: list
+"""
+import time
+
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule
+from ansible.module_utils._text import to_native
+
+
+class NetAppESeriesAsup(NetAppESeriesModule):
+ DAYS_OPTIONS = ["sunday", "monday", "tuesday", "wednesday", "thursday", "friday", "saturday"]
+
+ def __init__(self):
+
+ ansible_options = dict(
+ state=dict(type="str", required=False, default="enabled", choices=["enabled", "disabled", "maintenance_enabled", "maintenance_disabled"]),
+ active=dict(type="bool", required=False, default=True),
+ days=dict(type="list", required=False, aliases=["schedule_days", "days_of_week"], choices=self.DAYS_OPTIONS),
+ start=dict(type="int", required=False, default=0),
+ end=dict(type="int", required=False, default=24),
+ method=dict(type="str", required=False, choices=["https", "http", "email"], default="https"),
+ routing_type=dict(type="str", required=False, choices=["direct", "proxy", "script"], default="direct"),
+ proxy=dict(type="dict", required=False, options=dict(host=dict(type="str", required=False),
+ port=dict(type="int", required=False),
+ script=dict(type="str", required=False),
+ username=dict(type="str", required=False),
+ password=dict(type="str", no_log=True, required=False))),
+ email=dict(type="dict", required=False, options=dict(server=dict(type="str", required=False),
+ sender=dict(type="str", required=False),
+ test_recipient=dict(type="str", required=False))),
+ maintenance_duration=dict(type="int", required=False, default=24),
+ maintenance_emails=dict(type="list", required=False),
+ validate=dict(type="bool", require=False, default=False))
+
+ mutually_exclusive = [["host", "script"],
+ ["port", "script"]]
+
+ required_if = [["method", "https", ["routing_type"]],
+ ["method", "http", ["routing_type"]],
+ ["method", "email", ["email"]],
+ ["state", "maintenance_enabled", ["maintenance_duration", "maintenance_emails"]]]
+
+ super(NetAppESeriesAsup, self).__init__(ansible_options=ansible_options,
+ web_services_version="02.00.0000.0000",
+ mutually_exclusive=mutually_exclusive,
+ required_if=required_if,
+ supports_check_mode=True)
+
+ args = self.module.params
+ self.state = args["state"]
+ self.active = args["active"]
+ self.days = args["days"]
+ self.start = args["start"]
+ self.end = args["end"]
+
+ self.method = args["method"]
+ self.routing_type = args["routing_type"] if args["routing_type"] else "none"
+ self.proxy = args["proxy"]
+ self.email = args["email"]
+ self.maintenance_duration = args["maintenance_duration"]
+ self.maintenance_emails = args["maintenance_emails"]
+ self.validate = args["validate"]
+
+ if self.validate and self.email and "test_recipient" not in self.email.keys():
+ self.module.fail_json(msg="test_recipient must be provided for validating email delivery method. Array [%s]" % self.ssid)
+
+ self.check_mode = self.module.check_mode
+
+ if self.start >= self.end:
+ self.module.fail_json(msg="The value provided for the start time is invalid."
+ " It must be less than the end time.")
+ if self.start < 0 or self.start > 23:
+ self.module.fail_json(msg="The value provided for the start time is invalid. It must be between 0 and 23.")
+ else:
+ self.start = self.start * 60
+ if self.end < 1 or self.end > 24:
+ self.module.fail_json(msg="The value provided for the end time is invalid. It must be between 1 and 24.")
+ else:
+ self.end = min(self.end * 60, 1439)
+
+ if self.maintenance_duration < 1 or self.maintenance_duration > 72:
+ self.module.fail_json(msg="The maintenance duration must be equal to or between 1 and 72 hours.")
+
+ if not self.days:
+ self.days = self.DAYS_OPTIONS
+
+ # Check whether request needs to be forwarded on to the controller web services rest api.
+ self.url_path_prefix = ""
+ if not self.is_embedded() and self.ssid != "0" and self.ssid.lower() != "proxy":
+ self.url_path_prefix = "storage-systems/%s/forward/devmgr/v2/" % self.ssid
+
+ def get_configuration(self):
+ try:
+ rc, result = self.request(self.url_path_prefix + "device-asup")
+
+ if not (result["asupCapable"] and result["onDemandCapable"]):
+ self.module.fail_json(msg="ASUP is not supported on this device. Array Id [%s]." % self.ssid)
+ return result
+
+ except Exception as err:
+ self.module.fail_json(msg="Failed to retrieve ASUP configuration! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+
+ def in_maintenance_mode(self):
+ """Determine whether storage device is currently in maintenance mode."""
+ results = False
+ try:
+ rc, key_values = self.request(self.url_path_prefix + "key-values")
+
+ for key_value in key_values:
+ if key_value["key"] == "ansible_asup_maintenance_email_list":
+ if not self.maintenance_emails:
+ self.maintenance_emails = key_value["value"].split(",")
+ elif key_value["key"] == "ansible_asup_maintenance_stop_time":
+ if time.time() < float(key_value["value"]):
+ results = True
+
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve maintenance windows information! Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ return results
+
+ def update_configuration(self):
+ config = self.get_configuration()
+ update = False
+ body = dict()
+
+ # Build request body
+ if self.state == "enabled":
+ body = dict(asupEnabled=True)
+ if not config["asupEnabled"]:
+ update = True
+
+ if (config["onDemandEnabled"] and config["remoteDiagsEnabled"]) != self.active:
+ update = True
+ body.update(dict(onDemandEnabled=self.active,
+ remoteDiagsEnabled=self.active))
+ self.days.sort()
+ config["schedule"]["daysOfWeek"].sort()
+
+ body["schedule"] = dict(daysOfWeek=self.days,
+ dailyMinTime=self.start,
+ dailyMaxTime=self.end,
+ weeklyMinTime=self.start,
+ weeklyMaxTime=self.end)
+
+ if self.days != config["schedule"]["daysOfWeek"]:
+ update = True
+ if self.start != config["schedule"]["dailyMinTime"] or self.start != config["schedule"]["weeklyMinTime"]:
+ update = True
+ elif self.end != config["schedule"]["dailyMaxTime"] or self.end != config["schedule"]["weeklyMaxTime"]:
+ update = True
+
+ if self.method in ["https", "http"]:
+ if self.routing_type == "direct":
+ body["delivery"] = dict(method=self.method,
+ routingType="direct")
+ elif self.routing_type == "proxy":
+ body["delivery"] = dict(method=self.method,
+ proxyHost=self.proxy["host"],
+ proxyPort=self.proxy["port"],
+ routingType="proxyServer")
+ if "username" in self.proxy.keys():
+ body["delivery"].update({"proxyUserName": self.proxy["username"]})
+ if "password" in self.proxy.keys():
+ body["delivery"].update({"proxyPassword": self.proxy["password"]})
+
+ elif self.routing_type == "script":
+ body["delivery"] = dict(method=self.method,
+ proxyScript=self.proxy["script"],
+ routingType="proxyScript")
+
+ else:
+ body["delivery"] = dict(method="smtp",
+ mailRelayServer=self.email["server"],
+ mailSenderAddress=self.email["sender"],
+ routingType="none")
+
+ # Check whether changes are required.
+ if config["delivery"]["method"] != body["delivery"]["method"]:
+ update = True
+ elif config["delivery"]["method"] in ["https", "http"]:
+ if config["delivery"]["routingType"] != body["delivery"]["routingType"]:
+ update = True
+ elif config["delivery"]["routingType"] == "proxyServer":
+ if (config["delivery"]["proxyHost"] != body["delivery"]["proxyHost"] or
+ config["delivery"]["proxyPort"] != body["delivery"]["proxyPort"] or
+ config["delivery"]["proxyUserName"] != body["delivery"]["proxyUserName"] or
+ config["delivery"]["proxyPassword"] != body["delivery"]["proxyPassword"]):
+ update = True
+ elif config["delivery"]["routingType"] == "proxyScript":
+ if config["delivery"]["proxyScript"] != body["delivery"]["proxyScript"]:
+ update = True
+ elif (config["delivery"]["method"] == "smtp" and
+ config["delivery"]["mailRelayServer"] != body["delivery"]["mailRelayServer"] and
+ config["delivery"]["mailSenderAddress"] != body["delivery"]["mailSenderAddress"]):
+ update = True
+
+ if self.in_maintenance_mode():
+ update = True
+
+ elif self.state == "disabled":
+ if config["asupEnabled"]: # Disable asupEnable is asup is disabled.
+ body = dict(asupEnabled=False)
+ update = True
+
+ else:
+ if not config["asupEnabled"]:
+ self.module.fail_json(msg="AutoSupport must be enabled before enabling or disabling maintenance mode. Array [%s]." % self.ssid)
+
+ if self.in_maintenance_mode() or self.state == "maintenance_enabled":
+ update = True
+
+ # Apply required changes.
+ if update and not self.check_mode:
+ if self.state == "maintenance_enabled":
+ try:
+ rc, response = self.request(self.url_path_prefix + "device-asup/maintenance-window", method="POST",
+ data=dict(maintenanceWindowEnabled=True,
+ duration=self.maintenance_duration,
+ emailAddresses=self.maintenance_emails))
+ except Exception as error:
+ self.module.fail_json(msg="Failed to enabled ASUP maintenance window. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ # Add maintenance information to the key-value store
+ try:
+ rc, response = self.request(self.url_path_prefix + "key-values/ansible_asup_maintenance_email_list", method="POST",
+ data=",".join(self.maintenance_emails))
+ rc, response = self.request(self.url_path_prefix + "key-values/ansible_asup_maintenance_stop_time", method="POST",
+ data=str(time.time() + 60 * 60 * self.maintenance_duration))
+ except Exception as error:
+ self.module.fail_json(msg="Failed to store maintenance information. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ elif self.state == "maintenance_disabled":
+ try:
+ rc, response = self.request(self.url_path_prefix + "device-asup/maintenance-window", method="POST",
+ data=dict(maintenanceWindowEnabled=False,
+ emailAddresses=self.maintenance_emails))
+ except Exception as error:
+ self.module.fail_json(msg="Failed to disable ASUP maintenance window. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ # Remove maintenance information to the key-value store
+ try:
+ rc, response = self.request(self.url_path_prefix + "key-values/ansible_asup_maintenance_email_list", method="DELETE")
+ rc, response = self.request(self.url_path_prefix + "key-values/ansible_asup_maintenance_stop_time", method="DELETE")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to store maintenance information. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ else:
+ if body["asupEnabled"] and self.validate:
+ validate_body = dict(delivery=body["delivery"])
+ if self.email:
+ validate_body["mailReplyAddress"] = self.email["test_recipient"]
+
+ try:
+ rc, response = self.request(self.url_path_prefix + "device-asup/verify-config", timeout=600, method="POST", data=validate_body)
+ except Exception as err:
+ self.module.fail_json(msg="Failed to validate ASUP configuration! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+
+ try:
+ rc, response = self.request(self.url_path_prefix + "device-asup", method="POST", data=body)
+ # This is going to catch cases like a connection failure
+ except Exception as err:
+ self.module.fail_json(msg="Failed to change ASUP configuration! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+
+ return update
+
+ def apply(self):
+ update = self.update_configuration()
+ cfg = self.get_configuration()
+
+ if update:
+ self.module.exit_json(msg="The ASUP settings have been updated.", changed=update, asup=cfg["asupEnabled"], active=cfg["onDemandEnabled"], cfg=cfg)
+ else:
+ self.module.exit_json(msg="No ASUP changes required.", changed=update, asup=cfg["asupEnabled"], active=cfg["onDemandEnabled"], cfg=cfg)
+
+
+def main():
+ asup = NetAppESeriesAsup()
+ asup.apply()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_auditlog.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_auditlog.py
new file mode 100644
index 000000000..03a533fe2
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_auditlog.py
@@ -0,0 +1,200 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+---
+module: na_santricity_auditlog
+short_description: NetApp E-Series manage audit-log configuration
+description:
+ - This module allows an e-series storage system owner to set audit-log configuration parameters.
+author: Nathan Swartz (@ndswartz)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.santricity_doc
+options:
+ max_records:
+ description:
+ - The maximum number log messages audit-log will retain.
+ - Max records must be between and including 100 and 50000.
+ type: int
+ default: 50000
+ log_level:
+ description: Filters the log messages according to the specified log level selection.
+ choices:
+ - all
+ - writeOnly
+ type: str
+ default: writeOnly
+ full_policy:
+ description: Specifies what audit-log should do once the number of entries approach the record limit.
+ choices:
+ - overWrite
+ - preventSystemAccess
+ type: str
+ default: overWrite
+ threshold:
+ description:
+ - This is the memory full percent threshold that audit-log will start issuing warning messages.
+ - Percent range must be between and including 60 and 90.
+ type: int
+ default: 90
+ force:
+ description:
+ - Forces the audit-log configuration to delete log history when log messages fullness cause immediate
+ warning or full condition.
+ - Warning! This will cause any existing audit-log messages to be deleted.
+ - This is only applicable for I(full_policy=preventSystemAccess).
+ type: bool
+ default: no
+notes:
+ - Check mode is supported.
+ - Use I(ssid=="0") or I(ssid=="proxy") to configure SANtricity Web Services Proxy auditlog settings otherwise.
+"""
+
+EXAMPLES = """
+- name: Define audit-log to prevent system access if records exceed 50000 with warnings occurring at 60% capacity.
+ na_santricity_auditlog:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ max_records: 50000
+ log_level: all
+ full_policy: preventSystemAccess
+ threshold: 60
+"""
+
+RETURN = """
+msg:
+ description: Success message
+ returned: on success
+ type: str
+ sample: The settings have been updated.
+"""
+import json
+
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule
+from ansible.module_utils._text import to_native
+
+
+class NetAppESeriesAuditLog(NetAppESeriesModule):
+ """Audit-log module configuration class."""
+ MAX_RECORDS = 50000
+
+ def __init__(self):
+ ansible_options = dict(max_records=dict(type="int", default=50000),
+ log_level=dict(type="str", default="writeOnly", choices=["all", "writeOnly"]),
+ full_policy=dict(type="str", default="overWrite", choices=["overWrite", "preventSystemAccess"]),
+ threshold=dict(type="int", default=90),
+ force=dict(type="bool", default=False))
+ super(NetAppESeriesAuditLog, self).__init__(ansible_options=ansible_options,
+ web_services_version="02.00.0000.0000",
+ supports_check_mode=True)
+
+ args = self.module.params
+ self.log_level = args["log_level"]
+ self.force = args["force"]
+ self.full_policy = args["full_policy"]
+ self.max_records = args["max_records"]
+ self.threshold = args["threshold"]
+
+ if self.max_records < 100 or self.max_records > self.MAX_RECORDS:
+ self.module.fail_json(msg="Audit-log max_records count must be between 100 and 50000: [%s]" % self.max_records)
+
+ if self.threshold < 60 or self.threshold > 90:
+ self.module.fail_json(msg="Audit-log percent threshold must be between 60 and 90: [%s]" % self.threshold)
+
+ # Append web services proxy forward end point.
+ self.url_path_prefix = ""
+ if not self.is_embedded() and self.ssid != "0" and self.ssid.lower() != "proxy":
+ self.url_path_prefix = "storage-systems/%s/forward/devmgr/v2/" % self.ssid
+
+ def get_configuration(self):
+ """Retrieve the existing audit-log configurations.
+
+ :returns: dictionary containing current audit-log configuration
+ """
+ try:
+ if self.is_proxy() and (self.ssid == "0" or self.ssid.lower() != "proxy"):
+ rc, data = self.request("audit-log/config")
+ else:
+ rc, data = self.request(self.url_path_prefix + "storage-systems/1/audit-log/config")
+ return data
+ except Exception as err:
+ self.module.fail_json(msg="Failed to retrieve the audit-log configuration! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+
+ def build_configuration(self):
+ """Build audit-log expected configuration.
+
+ :returns: Tuple containing update boolean value and dictionary of audit-log configuration
+ """
+ config = self.get_configuration()
+
+ current = dict(auditLogMaxRecords=config["auditLogMaxRecords"],
+ auditLogLevel=config["auditLogLevel"],
+ auditLogFullPolicy=config["auditLogFullPolicy"],
+ auditLogWarningThresholdPct=config["auditLogWarningThresholdPct"])
+
+ body = dict(auditLogMaxRecords=self.max_records,
+ auditLogLevel=self.log_level,
+ auditLogFullPolicy=self.full_policy,
+ auditLogWarningThresholdPct=self.threshold)
+
+ update = current != body
+ return update, body
+
+ def delete_log_messages(self):
+ """Delete all audit-log messages."""
+ try:
+ if self.is_proxy() and (self.ssid == "0" or self.ssid.lower() != "proxy"):
+ rc, result = self.request("audit-log?clearAll=True", method="DELETE")
+ else:
+ rc, result = self.request(self.url_path_prefix + "storage-systems/1/audit-log?clearAll=True", method="DELETE")
+ except Exception as err:
+ self.module.fail_json(msg="Failed to delete audit-log messages! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+
+ def update_configuration(self, update=None, body=None, attempt_recovery=True):
+ """Update audit-log configuration."""
+ if update is None or body is None:
+ update, body = self.build_configuration()
+
+ if update and not self.module.check_mode:
+ try:
+ if self.is_proxy() and (self.ssid == "0" or self.ssid.lower() != "proxy"):
+ rc, result = self.request("audit-log/config", data=json.dumps(body), method='POST', ignore_errors=True)
+ else:
+ rc, result = self.request(self.url_path_prefix + "storage-systems/1/audit-log/config",
+ data=json.dumps(body), method='POST', ignore_errors=True)
+
+ if rc == 422:
+ if self.force and attempt_recovery:
+ self.delete_log_messages()
+ update = self.update_configuration(update, body, False)
+ else:
+ self.module.fail_json(msg="Failed to update audit-log configuration! Array Id [%s]. Error [%s]." % (self.ssid, to_native(rc, result)))
+
+ except Exception as error:
+ self.module.fail_json(msg="Failed to update audit-log configuration! Array Id [%s]. Error [%s]." % (self.ssid, to_native(error)))
+ return update
+
+ def update(self):
+ """Update the audit-log configuration."""
+ update = self.update_configuration()
+ if update:
+ self.module.exit_json(msg="Audit-log update complete", changed=update)
+ else:
+ self.module.exit_json(msg="No audit-log changes required", changed=update)
+
+
+def main():
+ auditlog = NetAppESeriesAuditLog()
+ auditlog.update()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_auth.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_auth.py
new file mode 100644
index 000000000..62e6d1da6
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_auth.py
@@ -0,0 +1,351 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+---
+module: na_santricity_auth
+short_description: NetApp E-Series set or update the password for a storage array device or SANtricity Web Services Proxy.
+description:
+ - Sets or updates the password for a storage array device or SANtricity Web Services Proxy.
+author:
+ - Nathan Swartz (@ndswartz)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.santricity_doc
+options:
+ current_admin_password:
+ description:
+ - The current admin password.
+ - When making changes to the embedded web services's login passwords, api_password will be used and current_admin_password will be ignored.
+ - When making changes to the proxy web services's login passwords, api_password will be used and current_admin_password will be ignored.
+ - Only required when the password has been set and will be ignored if not set.
+ type: str
+ required: false
+ password:
+ description:
+ - The password you would like to set.
+ - Cannot be more than 30 characters.
+ type: str
+ required: false
+ user:
+ description:
+ - The local user account password to update
+ - For systems prior to E2800, use admin to change the rw (system password).
+ - For systems prior to E2800, all choices except admin will be ignored.
+ type: str
+ choices: ["admin", "monitor", "support", "security", "storage"]
+ default: "admin"
+ required: false
+ minimum_password_length:
+ description:
+ - This option defines the minimum password length.
+ type: int
+ required: false
+notes:
+ - Set I(ssid=="0") or I(ssid=="proxy") when attempting to change the password for SANtricity Web Services Proxy.
+ - SANtricity Web Services Proxy storage password will be updated when changing the password on a managed storage system from the proxy; This is only true
+ when the storage system has been previously contacted.
+"""
+
+EXAMPLES = """
+- name: Set the initial password
+ na_santricity_auth:
+ ssid: 1
+ api_url: https://192.168.1.100:8443/devmgr/v2
+ api_username: admin
+ api_password: adminpass
+ validate_certs: true
+ current_admin_password: currentadminpass
+ password: newpassword123
+ user: admin
+"""
+
+RETURN = """
+msg:
+ description: Success message
+ returned: success
+ type: str
+ sample: "Password Updated Successfully"
+"""
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule
+from ansible.module_utils._text import to_native
+from time import sleep
+
+
+class NetAppESeriesAuth(NetAppESeriesModule):
+ def __init__(self):
+ version = "02.00.0000.0000"
+ ansible_options = dict(current_admin_password=dict(type="str", required=False, no_log=True),
+ password=dict(type="str", required=False, no_log=True),
+ user=dict(type="str", choices=["admin", "monitor", "support", "security", "storage"], default="admin", required=False),
+ minimum_password_length=dict(type="int", required=False, no_log=True))
+
+ super(NetAppESeriesAuth, self).__init__(ansible_options=ansible_options, web_services_version=version, supports_check_mode=True)
+ args = self.module.params
+ self.current_admin_password = args["current_admin_password"]
+ self.password = args["password"]
+ self.user = args["user"]
+ self.minimum_password_length = args["minimum_password_length"]
+
+ self.DEFAULT_HEADERS.update({"x-netapp-password-validate-method": "none"})
+
+ self.is_admin_password_set = None
+ self.current_password_length_requirement = None
+
+ def minimum_password_length_change_required(self):
+ """Retrieve the current storage array's global configuration."""
+ change_required = False
+ try:
+ if self.is_proxy():
+ if self.ssid == "0" or self.ssid.lower() == "proxy":
+ rc, system_info = self.request("local-users/info", force_basic_auth=False)
+
+ elif self.is_embedded_available():
+ rc, system_info = self.request("storage-systems/%s/forward/devmgr/v2/storage-systems/1/local-users/info" % self.ssid,
+ force_basic_auth=False)
+ else:
+ return False # legacy systems without embedded web services.
+ else:
+ rc, system_info = self.request("storage-systems/%s/local-users/info" % self.ssid, force_basic_auth=False)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to determine minimum password length. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ self.is_admin_password_set = system_info["adminPasswordSet"]
+ if self.minimum_password_length is not None and self.minimum_password_length != system_info["minimumPasswordLength"]:
+ change_required = True
+
+ if (self.password is not None and ((change_required and self.minimum_password_length > len(self.password)) or
+ (not change_required and system_info["minimumPasswordLength"] > len(self.password)))):
+ self.module.fail_json(msg="Password does not meet the length requirement [%s]. Array Id [%s]." % (system_info["minimumPasswordLength"], self.ssid))
+
+ return change_required
+
+ def update_minimum_password_length(self):
+ """Update automatic load balancing state."""
+ try:
+ if self.is_proxy():
+ if self.ssid == "0" or self.ssid.lower() == "proxy":
+ try:
+ if not self.is_admin_password_set:
+ self.creds["url_password"] = "admin"
+ rc, minimum_password_length = self.request("local-users/password-length", method="POST",
+ data={"minimumPasswordLength": self.minimum_password_length})
+ except Exception as error:
+ if not self.is_admin_password_set:
+ self.creds["url_password"] = ""
+ rc, minimum_password_length = self.request("local-users/password-length", method="POST",
+ data={"minimumPasswordLength": self.minimum_password_length})
+ elif self.is_embedded_available():
+ if not self.is_admin_password_set:
+ self.creds["url_password"] = ""
+ rc, minimum_password_length = self.request("storage-systems/%s/forward/devmgr/v2/storage-systems/1/local-users/password-length" % self.ssid,
+ method="POST", data={"minimumPasswordLength": self.minimum_password_length})
+ else:
+ if not self.is_admin_password_set:
+ self.creds["url_password"] = ""
+ rc, minimum_password_length = self.request("storage-systems/%s/local-users/password-length" % self.ssid, method="POST",
+ data={"minimumPasswordLength": self.minimum_password_length})
+ except Exception as error:
+ self.module.fail_json(msg="Failed to set minimum password length. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ def logout_system(self):
+ """Ensure system is logged out. This is required because login test will always succeed if previously logged in."""
+ try:
+ if self.is_proxy():
+ if self.ssid == "0" or self.ssid.lower() == "proxy":
+ rc, system_info = self.request("utils/login", rest_api_path=self.DEFAULT_BASE_PATH, method="DELETE", force_basic_auth=False)
+ elif self.is_embedded_available():
+ rc, system_info = self.request("storage-systems/%s/forward/devmgr/utils/login" % self.ssid, method="DELETE", force_basic_auth=False)
+ else:
+ # Nothing to do for legacy systems without embedded web services.
+ pass
+ else:
+ rc, system_info = self.request("utils/login", rest_api_path=self.DEFAULT_BASE_PATH, method="DELETE", force_basic_auth=False)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to log out of storage system [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ def password_change_required(self):
+ """Verify whether the current password is expected array password. Works only against embedded systems."""
+ if self.password is None:
+ return False
+
+ change_required = False
+ system_info = None
+ try:
+ if self.is_proxy():
+ if self.ssid == "0" or self.ssid.lower() == "proxy":
+ rc, system_info = self.request("local-users/info", force_basic_auth=False)
+ elif self.is_embedded_available():
+ rc, system_info = self.request("storage-systems/%s/forward/devmgr/v2/storage-systems/1/local-users/info" % self.ssid,
+ force_basic_auth=False)
+ else:
+ rc, response = self.request("storage-systems/%s/passwords" % self.ssid, ignore_errors=True)
+ system_info = {"minimumPasswordLength": 0, "adminPasswordSet": response["adminPasswordSet"]}
+ else:
+ rc, system_info = self.request("storage-systems/%s/local-users/info" % self.ssid, force_basic_auth=False)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve information about storage system [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ self.is_admin_password_set = system_info["adminPasswordSet"]
+
+ if not self.is_admin_password_set:
+ if self.user == "admin" and self.password != "":
+ change_required = True
+
+ # Determine whether user's password needs to be changed
+ else:
+ utils_login_used = False
+ self.logout_system() # This ensures that login test functions correctly. The query onlycheck=true does not work.
+
+ if self.is_proxy():
+ if self.ssid == "0" or self.ssid.lower() == "proxy":
+ utils_login_used = True
+ rc, response = self.request("utils/login?uid=%s&pwd=%s&xsrf=false&onlycheck=false" % (self.user, self.password),
+ rest_api_path=self.DEFAULT_BASE_PATH, log_request=False, ignore_errors=True, force_basic_auth=False)
+ # elif self.is_embedded_available():
+ # utils_login_used = True
+ # rc, response = self.request("storage-systems/%s/forward/devmgr/utils/login?uid=%s&pwd=%s&xsrf=false&onlycheck=false"
+ # % (self.ssid, self.user, self.password), log_request=False, ignore_errors=True, force_basic_auth=False)
+ else:
+ if self.user == "admin":
+ rc, response = self.request("storage-systems/%s/stored-password/validate" % self.ssid, method="POST", log_request=False,
+ ignore_errors=True, data={"password": self.password})
+ if rc == 200:
+ change_required = not response["isValidPassword"]
+ elif rc == 404: # endpoint did not exist, old proxy version
+ if self.is_web_services_version_met("04.10.0000.0000"):
+ self.module.fail_json(msg="For platforms before E2800 use SANtricity Web Services Proxy 4.1 or later! Array Id [%s].")
+ self.module.fail_json(msg="Failed to validate stored password! Array Id [%s].")
+ else:
+ self.module.fail_json(msg="Failed to validate stored password! Array Id [%s]." % self.ssid)
+ else:
+ self.module.fail_json(msg="Role based login not available! Only storage system password can be set for storage systems prior to E2800."
+ " Array Id [%s]." % self.ssid)
+ else:
+ utils_login_used = True
+ rc, response = self.request("utils/login?uid=%s&pwd=%s&xsrf=false&onlycheck=false" % (self.user, self.password),
+ rest_api_path=self.DEFAULT_BASE_PATH, log_request=False, ignore_errors=True, force_basic_auth=False)
+
+ # Check return codes to determine whether a change is required
+ if utils_login_used:
+ if rc == 401:
+ change_required = True
+ elif rc == 422:
+ self.module.fail_json(msg="SAML enabled! SAML disables default role based login. Array [%s]" % self.ssid)
+
+ return change_required
+
+ def set_array_admin_password(self):
+ """Set the array's admin password."""
+ if self.is_proxy():
+
+ # Update proxy's local users
+ if self.ssid == "0" or self.ssid.lower() == "proxy":
+ self.creds["url_password"] = "admin"
+ try:
+ body = {"currentAdminPassword": "", "updates": {"userName": "admin", "newPassword": self.password}}
+ rc, proxy = self.request("local-users", method="POST", data=body)
+ except Exception as error:
+ self.creds["url_password"] = ""
+ try:
+ body = {"currentAdminPassword": "", "updates": {"userName": "admin", "newPassword": self.password}}
+ rc, proxy = self.request("local-users", method="POST", data=body)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to set proxy's admin password. Error [%s]." % to_native(error))
+
+ self.creds["url_password"] = self.password
+
+ # Update password using the password endpoints, this will also update the storaged password
+ else:
+ try:
+ body = {"currentAdminPassword": "", "newPassword": self.password, "adminPassword": True}
+ rc, storage_system = self.request("storage-systems/%s/passwords" % self.ssid, method="POST", data=body)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to set storage system's admin password. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ # Update embedded local users
+ else:
+ self.creds["url_password"] = ""
+ try:
+ body = {"currentAdminPassword": "", "updates": {"userName": "admin", "newPassword": self.password}}
+ rc, proxy = self.request("storage-systems/%s/local-users" % self.ssid, method="POST", data=body)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to set embedded storage system's admin password. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+ self.creds["url_password"] = self.password
+
+ def set_array_password(self):
+ """Set the array password."""
+ if not self.is_admin_password_set:
+ self.module.fail_json(msg="Admin password not set! Set admin password before changing non-admin user passwords. Array [%s]." % self.ssid)
+
+ if self.is_proxy():
+
+ # Update proxy's local users
+ if self.ssid == "0" or self.ssid.lower() == "proxy":
+ try:
+ body = {"currentAdminPassword": self.creds["url_password"], "updates": {"userName": self.user, "newPassword": self.password}}
+ rc, proxy = self.request("local-users", method="POST", data=body)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to set proxy password. Error [%s]." % to_native(error))
+
+ # Update embedded admin password via proxy passwords endpoint to include updating proxy/unified manager
+ elif self.user == "admin":
+ try:
+ body = {"adminPassword": True, "currentAdminPassword": self.current_admin_password, "newPassword": self.password}
+ rc, proxy = self.request("storage-systems/%s/passwords" % self.ssid, method="POST", data=body)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to set embedded user password. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ # Update embedded non-admin passwords via proxy forward endpoint.
+ elif self.is_embedded_available():
+ try:
+ body = {"currentAdminPassword": self.current_admin_password, "updates": {"userName": self.user, "newPassword": self.password}}
+ rc, proxy = self.request("storage-systems/%s/forward/devmgr/v2/storage-systems/1/local-users" % self.ssid, method="POST", data=body)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to set embedded user password. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ # Update embedded local users
+ else:
+ try:
+ body = {"currentAdminPassword": self.creds["url_password"], "updates": {"userName": self.user, "newPassword": self.password}}
+ rc, proxy = self.request("storage-systems/%s/local-users" % self.ssid, method="POST", data=body)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to set embedded user password. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ def apply(self):
+ """Apply any required changes."""
+ password_change_required = self.password_change_required()
+ minimum_password_length_change_required = self.minimum_password_length_change_required()
+ change_required = password_change_required or minimum_password_length_change_required
+
+ if change_required and not self.module.check_mode:
+ if minimum_password_length_change_required:
+ self.update_minimum_password_length()
+
+ if password_change_required:
+ if not self.is_admin_password_set:
+ self.set_array_admin_password()
+ else:
+ self.set_array_password()
+
+ if password_change_required and minimum_password_length_change_required:
+ self.module.exit_json(msg="'%s' password and required password length has been changed. Array [%s]."
+ % (self.user, self.ssid), changed=change_required)
+ elif password_change_required:
+ self.module.exit_json(msg="'%s' password has been changed. Array [%s]." % (self.user, self.ssid), changed=change_required)
+ elif minimum_password_length_change_required:
+ self.module.exit_json(msg="Required password length has been changed. Array [%s]." % self.ssid, changed=change_required)
+ self.module.exit_json(msg="No changes have been made. Array [%s]." % self.ssid, changed=change_required)
+
+
+def main():
+ auth = NetAppESeriesAuth()
+ auth.apply()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_client_certificate.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_client_certificate.py
new file mode 100644
index 000000000..e7fe8eda7
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_client_certificate.py
@@ -0,0 +1,278 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+module: na_santricity_client_certificate
+short_description: NetApp E-Series manage remote server certificates.
+description: Manage NetApp E-Series storage array's remote server certificates.
+author: Nathan Swartz (@ndswartz)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.santricity_doc
+options:
+ certificates:
+ description:
+ - List of certificate files
+ - Each item must include the path to the file
+ type: list
+ required: false
+ remove_unspecified_user_certificates:
+ description:
+ - Whether to remove user install client certificates that are not specified in I(certificates).
+ type: bool
+ default: false
+ required: false
+ reload_certificates:
+ description:
+ - Whether to reload certificates when certificates have been added or removed.
+ - Certificates will not be available or removed until the servers have been reloaded.
+ type: bool
+ default: true
+ required: false
+notes:
+ - Set I(ssid=="0") or I(ssid=="proxy") to specifically reference SANtricity Web Services Proxy.
+requirements:
+ - cryptography
+"""
+EXAMPLES = """
+- name: Upload certificates
+ na_santricity_client_certificate:
+ ssid: 1
+ api_url: https://192.168.1.100:8443/devmgr/v2
+ api_username: admin
+ api_password: adminpass
+ certificates: ["/path/to/certificates.crt", "/path/to/another_certificate.crt"]
+- name: Remove all certificates
+ na_santricity_client_certificate:
+ ssid: 1
+ api_url: https://192.168.1.100:8443/devmgr/v2
+ api_username: admin
+ api_password: adminpass
+"""
+RETURN = """
+changed:
+ description: Whether changes have been made.
+ type: bool
+ returned: always
+ sample: true
+add_certificates:
+ description: Any SSL certificates that were added.
+ type: list
+ returned: always
+ sample: ["added_cerificiate.crt"]
+removed_certificates:
+ description: Any SSL certificates that were removed.
+ type: list
+ returned: always
+ sample: ["removed_cerificiate.crt"]
+"""
+
+import binascii
+import os
+import re
+from time import sleep
+
+from datetime import datetime
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule, create_multipart_formdata
+from ansible.module_utils._text import to_native
+
+try:
+ from cryptography import x509
+ from cryptography.hazmat.backends import default_backend
+except ImportError:
+ HAS_CRYPTOGRAPHY = False
+else:
+ HAS_CRYPTOGRAPHY = True
+
+
+class NetAppESeriesClientCertificate(NetAppESeriesModule):
+ RELOAD_TIMEOUT_SEC = 3 * 60
+
+ def __init__(self):
+ ansible_options = dict(certificates=dict(type="list", required=False),
+ remove_unspecified_user_certificates=dict(type="bool", default=False, required=False),
+ reload_certificates=dict(type="bool", default=True, required=False))
+
+ super(NetAppESeriesClientCertificate, self).__init__(ansible_options=ansible_options,
+ web_services_version="02.00.0000.0000",
+ supports_check_mode=True)
+
+ args = self.module.params
+ self.certificates = args["certificates"] if args["certificates"] else []
+ self.remove_unspecified_user_certificates = args["remove_unspecified_user_certificates"]
+ self.apply_reload_certificates = args["reload_certificates"]
+
+ # Check whether request needs to be forwarded on to the controller web services rest api.
+ self.url_path_prefix = ""
+ if self.is_proxy() and self.ssid != "0" and self.ssid.lower() != "proxy":
+ self.url_path_prefix = "storage-systems/%s/forward/devmgr/v2/" % self.ssid
+
+ self.remove_certificates = list()
+ self.add_certificates = list()
+ self.certificate_fingerprint_cache = None
+ self.certificate_info_cache = None
+
+ def certificate_info(self, path):
+ """Determine the pertinent certificate information: alias, subjectDN, issuerDN, start and expire.
+
+ Note: Use only when certificate/remote-server endpoints do not exist. Used to identify certificates through
+ the sslconfig/ca endpoint.
+ """
+ certificate = None
+ with open(path, "rb") as fh:
+ data = fh.read()
+ try:
+ certificate = x509.load_pem_x509_certificate(data, default_backend())
+ except Exception as error:
+ try:
+ certificate = x509.load_der_x509_certificate(data, default_backend())
+ except Exception as error:
+ self.module.fail_json(msg="Failed to load certificate. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ if not isinstance(certificate, x509.Certificate):
+ self.module.fail_json(msg="Failed to open certificate file or invalid certificate object type. Array [%s]." % self.ssid)
+
+ return dict(start_date=certificate.not_valid_before,
+ expire_date=certificate.not_valid_after,
+ subject_dn=[attr.value for attr in certificate.subject],
+ issuer_dn=[attr.value for attr in certificate.issuer])
+
+ def certificate_fingerprint(self, path):
+ """Load x509 certificate that is either encoded DER or PEM encoding and return the certificate fingerprint."""
+ certificate = None
+ with open(path, "rb") as fh:
+ data = fh.read()
+ try:
+ certificate = x509.load_pem_x509_certificate(data, default_backend())
+ except Exception as error:
+ try:
+ certificate = x509.load_der_x509_certificate(data, default_backend())
+ except Exception as error:
+ self.module.fail_json(msg="Failed to determine certificate fingerprint. File [%s]. Array [%s]. Error [%s]."
+ % (path, self.ssid, to_native(error)))
+
+ return binascii.hexlify(certificate.fingerprint(certificate.signature_hash_algorithm)).decode("utf-8")
+
+ def determine_changes(self):
+ """Search for remote server certificate that goes by the alias or has a matching fingerprint."""
+ rc, current_certificates = self.request(self.url_path_prefix + "certificates/remote-server", ignore_errors=True)
+
+ if rc == 404: # system down or endpoint does not exist
+ rc, current_certificates = self.request(self.url_path_prefix + "sslconfig/ca?useTruststore=true", ignore_errors=True)
+
+ if rc > 299:
+ self.module.fail_json(msg="Failed to retrieve remote server certificates. Array [%s]." % self.ssid)
+
+ user_installed_certificates = [certificate for certificate in current_certificates if certificate["isUserInstalled"]]
+ existing_certificates = []
+
+ for path in self.certificates:
+ for current_certificate in user_installed_certificates:
+ info = self.certificate_info(path)
+ tmp = dict(subject_dn=[re.sub(r".*=", "", item) for item in current_certificate["subjectDN"].split(", ")],
+ issuer_dn=[re.sub(r".*=", "", item) for item in current_certificate["issuerDN"].split(", ")],
+ start_date=datetime.strptime(current_certificate["start"].split(".")[0], "%Y-%m-%dT%H:%M:%S"),
+ expire_date=datetime.strptime(current_certificate["expire"].split(".")[0], "%Y-%m-%dT%H:%M:%S"))
+ if (all([attr in info["subject_dn"] for attr in tmp["subject_dn"]]) and
+ all([attr in info["issuer_dn"] for attr in tmp["issuer_dn"]]) and
+ tmp["start_date"] == info["start_date"] and
+ tmp["expire_date"] == info["expire_date"]):
+ existing_certificates.append(current_certificate)
+ break
+ else:
+ self.add_certificates.append(path)
+ if self.remove_unspecified_user_certificates:
+ self.remove_certificates = [certificate for certificate in user_installed_certificates if certificate not in existing_certificates]
+
+ elif rc > 299:
+ self.module.fail_json(msg="Failed to retrieve remote server certificates. Array [%s]." % self.ssid)
+
+ else:
+ user_installed_certificates = [certificate for certificate in current_certificates if certificate["isUserInstalled"]]
+ existing_certificates = []
+ for path in self.certificates:
+ fingerprint = self.certificate_fingerprint(path)
+ for current_certificate in user_installed_certificates:
+ if current_certificate["sha256Fingerprint"] == fingerprint or current_certificate["shaFingerprint"] == fingerprint:
+ existing_certificates.append(current_certificate)
+ break
+ else:
+ self.add_certificates.append(path)
+ if self.remove_unspecified_user_certificates:
+ self.remove_certificates = [certificate for certificate in user_installed_certificates if certificate not in existing_certificates]
+
+ def upload_certificate(self, path):
+ """Add or update remote server certificate to the storage array."""
+ file_name = os.path.basename(path)
+ headers, data = create_multipart_formdata(files=[("file", file_name, path)])
+
+ rc, resp = self.request(self.url_path_prefix + "certificates/remote-server", method="POST", headers=headers, data=data, ignore_errors=True)
+ if rc == 404:
+ rc, resp = self.request(self.url_path_prefix + "sslconfig/ca?useTruststore=true", method="POST", headers=headers, data=data, ignore_errors=True)
+
+ if rc > 299:
+ self.module.fail_json(msg="Failed to upload certificate. Array [%s]. Error [%s, %s]." % (self.ssid, rc, resp))
+
+ def delete_certificate(self, info):
+ """Delete existing remote server certificate in the storage array truststore."""
+ rc, resp = self.request(self.url_path_prefix + "certificates/remote-server/%s" % info["alias"], method="DELETE", ignore_errors=True)
+ if rc == 404:
+ rc, resp = self.request(self.url_path_prefix + "sslconfig/ca/%s?useTruststore=true" % info["alias"], method="DELETE", ignore_errors=True)
+
+ if rc > 204:
+ self.module.fail_json(msg="Failed to delete certificate. Alias [%s]. Array [%s]. Error [%s, %s]." % (info["alias"], self.ssid, rc, resp))
+
+ def reload_certificates(self):
+ """Reload certificates on both controllers."""
+ rc, resp = self.request(self.url_path_prefix + "certificates/reload?reloadBoth=true", method="POST", ignore_errors=True)
+ if rc == 404:
+ rc, resp = self.request(self.url_path_prefix + "sslconfig/reload?reloadBoth=true", method="POST", ignore_errors=True)
+
+ if rc > 202:
+ self.module.fail_json(msg="Failed to initiate certificate reload on both controllers! Array [%s]." % self.ssid)
+
+ # Wait for controller to be online again.
+ for retry in range(int(self.RELOAD_TIMEOUT_SEC / 3)):
+ rc, current_certificates = self.request(self.url_path_prefix + "certificates/remote-server", ignore_errors=True)
+
+ if rc == 404: # system down or endpoint does not exist
+ rc, current_certificates = self.request(self.url_path_prefix + "sslconfig/ca?useTruststore=true", ignore_errors=True)
+
+ if rc < 300:
+ break
+ sleep(3)
+ else:
+ self.module.fail_json(msg="Failed to retrieve server certificates. Array [%s]." % self.ssid)
+
+ def apply(self):
+ """Apply state changes to the storage array's truststore."""
+ changed = False
+
+ self.determine_changes()
+ if self.remove_certificates or self.add_certificates:
+ changed = True
+
+ if changed and not self.module.check_mode:
+ for info in self.remove_certificates:
+ self.delete_certificate(info)
+
+ for path in self.add_certificates:
+ self.upload_certificate(path)
+
+ if self.apply_reload_certificates:
+ self.reload_certificates()
+
+ self.module.exit_json(changed=changed, removed_certificates=self.remove_certificates, add_certificates=self.add_certificates)
+
+
+def main():
+ client_certs = NetAppESeriesClientCertificate()
+ client_certs.apply()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_discover.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_discover.py
new file mode 100644
index 000000000..c283c3d46
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_discover.py
@@ -0,0 +1,332 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+---
+module: na_santricity_discover
+short_description: NetApp E-Series discover E-Series storage systems
+description: Module searches a subnet range and returns any available E-Series storage systems.
+author: Nathan Swartz (@ndswartz)
+options:
+ subnet_mask:
+ description:
+ - This is the IPv4 search range for discovering E-Series storage arrays.
+ - IPv4 subnet mask specified in CIDR form. Example 192.168.1.0/24 would search the range 192.168.1.0 to 192.168.1.255.
+ - Be sure to include all management paths in the search range.
+ type: str
+ required: true
+ ports:
+ description:
+ - This option specifies which ports to be tested during the discovery process.
+ - The first usable port will be used in the returned API url.
+ type: list
+ default: [8443]
+ required: false
+ proxy_url:
+ description:
+ - Web Services Proxy REST API URL. Example https://192.168.1.100:8443/devmgr/v2/
+ type: str
+ required: false
+ proxy_username:
+ description:
+ - Web Service Proxy username
+ type: str
+ required: false
+ proxy_password:
+ description:
+ - Web Service Proxy user password
+ type: str
+ required: false
+ proxy_validate_certs:
+ description:
+ - Whether to validate Web Service Proxy SSL certificate
+ type: bool
+ default: true
+ required: false
+ prefer_embedded:
+ description:
+ - Give preference to Web Services Embedded when an option exists for both Web Services Proxy and Embedded.
+ - Web Services Proxy will be utilized when available by default.
+ type: bool
+ default: false
+ required: false
+notes:
+ - Only available for platforms E2800 or later (SANtricity Web Services Embedded REST API must be available).
+ - All E-Series storage systems with SANtricity version 11.62 or later will be discovered.
+ - Only E-Series storage systems without a set admin password running SANtricity versions prior to 11.62 will be discovered.
+ - Use SANtricity Web Services Proxy to discover all systems regardless of SANricity version or password.
+requirements:
+ - ipaddress
+"""
+
+EXAMPLES = """
+- name: Discover all E-Series storage systems on the network.
+ na_santricity_discover:
+ subnet_mask: 192.168.1.0/24
+"""
+
+RETURN = """
+systems_found:
+ description: Success message
+ returned: on success
+ type: dict
+ sample: '{"012341234123": {
+ "addresses": ["192.168.1.184", "192.168.1.185"],
+ "api_urls": ["https://192.168.1.184:8443/devmgr/v2/", "https://192.168.1.185:8443/devmgr/v2/"],
+ "label": "ExampleArray01",
+ "proxy_ssid: "",
+ "proxy_required": false},
+ "012341234567": {
+ "addresses": ["192.168.1.23", "192.168.1.24"],
+ "api_urls": ["https://192.168.1.100:8443/devmgr/v2/"],
+ "label": "ExampleArray02",
+ "proxy_ssid": "array_ssid",
+ "proxy_required": true}}'
+"""
+
+import json
+import multiprocessing
+import threading
+from time import sleep
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import request
+from ansible.module_utils._text import to_native
+
+try:
+ import ipaddress
+except ImportError:
+ HAS_IPADDRESS = False
+else:
+ HAS_IPADDRESS = True
+
+try:
+ import urlparse
+except ImportError:
+ import urllib.parse as urlparse
+
+
+class NetAppESeriesDiscover:
+ """Discover E-Series storage systems."""
+ MAX_THREAD_POOL_SIZE = 256
+ CPU_THREAD_MULTIPLE = 32
+ SEARCH_TIMEOUT = 30
+ DEFAULT_CONNECTION_TIMEOUT_SEC = 30
+ DEFAULT_DISCOVERY_TIMEOUT_SEC = 300
+
+ def __init__(self):
+ ansible_options = dict(subnet_mask=dict(type="str", required=True),
+ ports=dict(type="list", required=False, default=[8443]),
+ proxy_url=dict(type="str", required=False),
+ proxy_username=dict(type="str", required=False),
+ proxy_password=dict(type="str", required=False, no_log=True),
+ proxy_validate_certs=dict(type="bool", default=True, required=False),
+ prefer_embedded=dict(type="bool", default=False, required=False))
+
+ required_together = [["proxy_url", "proxy_username", "proxy_password"]]
+ self.module = AnsibleModule(argument_spec=ansible_options, required_together=required_together)
+ args = self.module.params
+
+ self.subnet_mask = args["subnet_mask"]
+ self.prefer_embedded = args["prefer_embedded"]
+ self.ports = []
+ self.proxy_url = args["proxy_url"]
+ if args["proxy_url"]:
+ parsed_url = list(urlparse.urlparse(args["proxy_url"]))
+ parsed_url[2] = "/devmgr/utils/about"
+ self.proxy_about_url = urlparse.urlunparse(parsed_url)
+ parsed_url[2] = "/devmgr/v2/"
+ self.proxy_url = urlparse.urlunparse(parsed_url)
+ self.proxy_username = args["proxy_username"]
+ self.proxy_password = args["proxy_password"]
+ self.proxy_validate_certs = args["proxy_validate_certs"]
+
+ for port in args["ports"]:
+ if str(port).isdigit() and 0 < port < 2 ** 16:
+ self.ports.append(str(port))
+ else:
+ self.module.fail_json(msg="Invalid port! Ports must be positive numbers between 0 and 65536.")
+
+ self.systems_found = {}
+
+ def check_ip_address(self, systems_found, address):
+ """Determine where an E-Series storage system is available at a specific ip address."""
+ for port in self.ports:
+ if port == "8080":
+ url = "http://%s:%s/" % (address, port)
+ else:
+ url = "https://%s:%s/" % (address, port)
+
+ try:
+ rc, about = request(url + "devmgr/v2/storage-systems/1/about", validate_certs=False, force_basic_auth=False, ignore_errors=True)
+ if about["serialNumber"] in systems_found:
+ systems_found[about["serialNumber"]]["api_urls"].append(url)
+ else:
+ systems_found.update({about["serialNumber"]: {"api_urls": [url], "label": about["name"],
+ "addresses": [], "proxy_ssid": "", "proxy_required": False}})
+ break
+ except Exception as error:
+ try:
+ rc, sa_data = request(url + "devmgr/v2/storage-systems/1/symbol/getSAData", validate_certs=False, force_basic_auth=False,
+ ignore_errors=True)
+ if rc == 401: # Unauthorized
+ self.module.warn(
+ "Fail over and discover any storage system without a set admin password. This will discover systems without a set password"
+ " such as newly deployed storage systems. Address [%s]." % address)
+ # Fail over and discover any storage system without a set admin password. This will cover newly deployed systems.
+ rc, graph = request(url + "graph", validate_certs=False, url_username="admin", url_password="", timeout=self.SEARCH_TIMEOUT)
+ sa_data = graph["sa"]["saData"]
+
+ if sa_data["chassisSerialNumber"] in systems_found:
+ systems_found[sa_data["chassisSerialNumber"]]["api_urls"].append(url)
+ else:
+ systems_found.update({sa_data["chassisSerialNumber"]: {"api_urls": [url], "label": sa_data["storageArrayLabel"],
+ "addresses": [], "proxy_ssid": "", "proxy_required": False}})
+ break
+ except Exception as error:
+ pass
+
+ def no_proxy_discover(self):
+ """Discover E-Series storage systems using embedded web services."""
+ thread_pool_size = min(multiprocessing.cpu_count() * self.CPU_THREAD_MULTIPLE, self.MAX_THREAD_POOL_SIZE)
+ subnet = list(ipaddress.ip_network(u"%s" % self.subnet_mask))
+
+ thread_pool = []
+ search_count = len(subnet)
+ for start in range(0, search_count, thread_pool_size):
+ end = search_count if (search_count - start) < thread_pool_size else start + thread_pool_size
+
+ for address in subnet[start:end]:
+ thread = threading.Thread(target=self.check_ip_address, args=(self.systems_found, address))
+ thread_pool.append(thread)
+ thread.start()
+ for thread in thread_pool:
+ thread.join()
+
+ def verify_proxy_service(self):
+ """Verify proxy url points to a web services proxy."""
+ try:
+ rc, about = request(self.proxy_about_url, validate_certs=self.proxy_validate_certs)
+ if not about["runningAsProxy"]:
+ self.module.fail_json(msg="Web Services is not running as a proxy!")
+ except Exception as error:
+ self.module.fail_json(msg="Proxy is not available! Check proxy_url. Error [%s]." % to_native(error))
+
+ def test_systems_found(self, systems_found, serial, label, addresses):
+ """Verify and build api urls."""
+ api_urls = []
+ for address in addresses:
+ for port in self.ports:
+ if port == "8080":
+ url = "http://%s:%s/devmgr/" % (address, port)
+ else:
+ url = "https://%s:%s/devmgr/" % (address, port)
+
+ try:
+ rc, response = request(url + "utils/about", validate_certs=False, timeout=self.SEARCH_TIMEOUT)
+ api_urls.append(url + "v2/")
+ break
+ except Exception as error:
+ pass
+ systems_found.update({serial: {"api_urls": api_urls,
+ "label": label,
+ "addresses": addresses,
+ "proxy_ssid": "",
+ "proxy_required": False}})
+
+ def proxy_discover(self):
+ """Search for array using it's chassis serial from web services proxy."""
+ self.verify_proxy_service()
+ subnet = ipaddress.ip_network(u"%s" % self.subnet_mask)
+
+ try:
+ rc, request_id = request(self.proxy_url + "discovery", method="POST", validate_certs=self.proxy_validate_certs,
+ force_basic_auth=True, url_username=self.proxy_username, url_password=self.proxy_password,
+ data=json.dumps({"startIP": str(subnet[0]), "endIP": str(subnet[-1]),
+ "connectionTimeout": self.DEFAULT_CONNECTION_TIMEOUT_SEC}))
+
+ # Wait for discover to complete
+ try:
+ for iteration in range(self.DEFAULT_DISCOVERY_TIMEOUT_SEC):
+ rc, discovered_systems = request(self.proxy_url + "discovery?requestId=%s" % request_id["requestId"],
+ validate_certs=self.proxy_validate_certs,
+ force_basic_auth=True, url_username=self.proxy_username, url_password=self.proxy_password)
+ if not discovered_systems["discoverProcessRunning"]:
+ thread_pool = []
+ for discovered_system in discovered_systems["storageSystems"]:
+ addresses = []
+ for controller in discovered_system["controllers"]:
+ addresses.extend(controller["ipAddresses"])
+
+ # Storage systems with embedded web services.
+ if "https" in discovered_system["supportedManagementPorts"] and self.prefer_embedded:
+
+ thread = threading.Thread(target=self.test_systems_found,
+ args=(self.systems_found, discovered_system["serialNumber"], discovered_system["label"], addresses))
+ thread_pool.append(thread)
+ thread.start()
+
+ # Storage systems without embedded web services.
+ else:
+ self.systems_found.update({discovered_system["serialNumber"]: {"api_urls": [self.proxy_url],
+ "label": discovered_system["label"],
+ "addresses": addresses,
+ "proxy_ssid": "",
+ "proxy_required": True}})
+ for thread in thread_pool:
+ thread.join()
+ break
+ sleep(1)
+ else:
+ self.module.fail_json(msg="Timeout waiting for array discovery process. Subnet [%s]" % self.subnet_mask)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to get the discovery results. Error [%s]." % to_native(error))
+ except Exception as error:
+ self.module.fail_json(msg="Failed to initiate array discovery. Error [%s]." % to_native(error))
+
+ def update_proxy_with_proxy_ssid(self):
+ """Determine the current proxy ssid for all discovered-proxy_required storage systems."""
+ # Discover all added storage systems to the proxy.
+ systems = []
+ try:
+ rc, systems = request(self.proxy_url + "storage-systems", validate_certs=self.proxy_validate_certs,
+ force_basic_auth=True, url_username=self.proxy_username, url_password=self.proxy_password)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to ascertain storage systems added to Web Services Proxy.")
+
+ for system_key, system_info in self.systems_found.items():
+ if self.systems_found[system_key]["proxy_required"]:
+ for system in systems:
+ if system_key == system["chassisSerialNumber"]:
+ self.systems_found[system_key]["proxy_ssid"] = system["id"]
+
+ def discover(self):
+ """Discover E-Series storage systems."""
+ missing_packages = []
+ if not HAS_IPADDRESS:
+ missing_packages.append("ipaddress")
+
+ if missing_packages:
+ self.module.fail_json(msg="Python packages are missing! Packages [%s]." % ", ".join(missing_packages))
+
+ if self.proxy_url:
+ self.proxy_discover()
+ self.update_proxy_with_proxy_ssid()
+ else:
+ self.no_proxy_discover()
+
+ self.module.exit_json(msg="Discover process complete.", systems_found=self.systems_found, changed=False)
+
+
+def main():
+ discover = NetAppESeriesDiscover()
+ discover.discover()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_drive_firmware.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_drive_firmware.py
new file mode 100644
index 000000000..612ce2bd6
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_drive_firmware.py
@@ -0,0 +1,209 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+---
+module: na_santricity_drive_firmware
+short_description: NetApp E-Series manage drive firmware
+description:
+ - Ensure drive firmware version is activated on specified drive model.
+author:
+ - Nathan Swartz (@ndswartz)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.santricity_doc
+options:
+ firmware:
+ description:
+ - list of drive firmware file paths.
+ - NetApp E-Series drives require special firmware which can be downloaded from https://mysupport.netapp.com/NOW/download/tools/diskfw_eseries/
+ type: list
+ required: True
+ wait_for_completion:
+ description:
+ - This flag will cause module to wait for any upgrade actions to complete.
+ type: bool
+ default: false
+ ignore_inaccessible_drives:
+ description:
+ - This flag will determine whether drive firmware upgrade should fail if any affected drives are inaccessible.
+ type: bool
+ default: false
+ upgrade_drives_online:
+ description:
+ - This flag will determine whether drive firmware can be upgrade while drives are accepting I/O.
+ - When I(upgrade_drives_online==False) stop all I/O before running task.
+ type: bool
+ default: true
+"""
+EXAMPLES = """
+- name: Ensure correct firmware versions
+ na_santricity_drive_firmware:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ firmware: "path/to/drive_firmware"
+ wait_for_completion: true
+ ignore_inaccessible_drives: false
+"""
+RETURN = """
+msg:
+ description: Whether any drive firmware was upgraded and whether it is in progress.
+ type: str
+ returned: always
+ sample:
+ { changed: True, upgrade_in_process: True }
+"""
+import os
+import re
+
+from time import sleep
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule, create_multipart_formdata, request
+from ansible.module_utils._text import to_native
+
+
+class NetAppESeriesDriveFirmware(NetAppESeriesModule):
+ WAIT_TIMEOUT_SEC = 60 * 15
+
+ def __init__(self):
+ ansible_options = dict(
+ firmware=dict(type="list", required=True),
+ wait_for_completion=dict(type="bool", default=False),
+ ignore_inaccessible_drives=dict(type="bool", default=False),
+ upgrade_drives_online=dict(type="bool", default=True))
+
+ super(NetAppESeriesDriveFirmware, self).__init__(ansible_options=ansible_options,
+ web_services_version="02.00.0000.0000",
+ supports_check_mode=True)
+
+ args = self.module.params
+ self.firmware_list = args["firmware"]
+ self.wait_for_completion = args["wait_for_completion"]
+ self.ignore_inaccessible_drives = args["ignore_inaccessible_drives"]
+ self.upgrade_drives_online = args["upgrade_drives_online"]
+
+ self.upgrade_list_cache = None
+
+ self.upgrade_required_cache = None
+ self.upgrade_in_progress = False
+ self.drive_info_cache = None
+
+ def upload_firmware(self):
+ """Ensure firmware has been upload prior to uploaded."""
+ for firmware in self.firmware_list:
+ firmware_name = os.path.basename(firmware)
+ files = [("file", firmware_name, firmware)]
+ headers, data = create_multipart_formdata(files)
+ try:
+ rc, response = self.request("/files/drive", method="POST", headers=headers, data=data)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to upload drive firmware [%s]. Array [%s]. Error [%s]." % (firmware_name, self.ssid, to_native(error)))
+
+ def upgrade_list(self):
+ """Determine whether firmware is compatible with the specified drives."""
+ if self.upgrade_list_cache is None:
+ self.upgrade_list_cache = list()
+ try:
+ rc, response = self.request("storage-systems/%s/firmware/drives" % self.ssid)
+
+ # Create upgrade list, this ensures only the firmware uploaded is applied
+ for firmware in self.firmware_list:
+ filename = os.path.basename(firmware)
+
+ for uploaded_firmware in response["compatibilities"]:
+ if uploaded_firmware["filename"] == filename:
+
+ # Determine whether upgrade is required
+ drive_reference_list = []
+ for drive in uploaded_firmware["compatibleDrives"]:
+ try:
+ rc, drive_info = self.request("storage-systems/%s/drives/%s" % (self.ssid, drive["driveRef"]))
+
+ # Add drive references that are supported and differ from current firmware
+ if (drive_info["firmwareVersion"] != uploaded_firmware["firmwareVersion"] and
+ uploaded_firmware["firmwareVersion"] in uploaded_firmware["supportedFirmwareVersions"]):
+
+ if self.ignore_inaccessible_drives or not drive_info["offline"]:
+ drive_reference_list.append(drive["driveRef"])
+
+ if not drive["onlineUpgradeCapable"] and self.upgrade_drives_online:
+ self.module.fail_json(msg="Drive is not capable of online upgrade. Array [%s]. Drive [%s]."
+ % (self.ssid, drive["driveRef"]))
+
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve drive information. Array [%s]. Drive [%s]. Error [%s]."
+ % (self.ssid, drive["driveRef"], to_native(error)))
+
+ if drive_reference_list:
+ self.upgrade_list_cache.extend([{"filename": filename, "driveRefList": drive_reference_list}])
+
+ except Exception as error:
+ self.module.fail_json(msg="Failed to complete compatibility and health check. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ return self.upgrade_list_cache
+
+ def wait_for_upgrade_completion(self):
+ """Wait for drive firmware upgrade to complete."""
+ drive_references = [reference for drive in self.upgrade_list() for reference in drive["driveRefList"]]
+ last_status = None
+ for attempt in range(int(self.WAIT_TIMEOUT_SEC / 5)):
+ try:
+ rc, response = self.request("storage-systems/%s/firmware/drives/state" % self.ssid)
+
+ # Check drive status
+ for status in response["driveStatus"]:
+ last_status = status
+ if status["driveRef"] in drive_references:
+ if status["status"] == "okay":
+ continue
+ elif status["status"] in ["inProgress", "inProgressRecon", "pending", "notAttempted"]:
+ break
+ else:
+ self.module.fail_json(msg="Drive firmware upgrade failed. Array [%s]. Drive [%s]. Status [%s]."
+ % (self.ssid, status["driveRef"], status["status"]))
+ else:
+ self.upgrade_in_progress = False
+ break
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve drive status. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ sleep(5)
+ else:
+ self.module.fail_json(msg="Timed out waiting for drive firmware upgrade. Array [%s]. Status [%s]." % (self.ssid, last_status))
+
+ def upgrade(self):
+ """Apply firmware to applicable drives."""
+ try:
+ rc, response = self.request("storage-systems/%s/firmware/drives/initiate-upgrade?onlineUpdate=%s"
+ % (self.ssid, "true" if self.upgrade_drives_online else "false"), method="POST", data=self.upgrade_list())
+ self.upgrade_in_progress = True
+ except Exception as error:
+ self.module.fail_json(msg="Failed to upgrade drive firmware. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ if self.wait_for_completion:
+ self.wait_for_upgrade_completion()
+
+ def apply(self):
+ """Apply firmware policy has been enforced on E-Series storage system."""
+ self.upload_firmware()
+
+ if self.upgrade_list() and not self.module.check_mode:
+ self.upgrade()
+
+ self.module.exit_json(changed=True if self.upgrade_list() else False,
+ upgrade_in_process=self.upgrade_in_progress)
+
+
+def main():
+ drive_firmware = NetAppESeriesDriveFirmware()
+ drive_firmware.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_facts.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_facts.py
new file mode 100644
index 000000000..32906e0d4
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_facts.py
@@ -0,0 +1,1185 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: na_santricity_facts
+short_description: NetApp E-Series retrieve facts about NetApp E-Series storage arrays
+description:
+ - The na_santricity_facts module returns a collection of facts regarding NetApp E-Series storage arrays.
+author:
+ - Kevin Hulquest (@hulquest)
+ - Nathan Swartz (@ndswartz)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.santricity_doc
+'''
+
+EXAMPLES = """
+---
+- name: Get array facts
+ na_santricity_facts:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+"""
+
+RETURN = """
+ msg:
+ description: Success message
+ returned: on success
+ type: str
+ sample:
+ - Gathered facts for storage array. Array ID [1].
+ - Gathered facts for web services proxy.
+ storage_array_facts:
+ description: provides details about the array, controllers, management interfaces, hostside interfaces,
+ driveside interfaces, disks, storage pools, volumes, snapshots, and features.
+ returned: on successful inquiry from from embedded web services rest api
+ type: complex
+ contains:
+ netapp_controllers:
+ description: storage array controller list that contains basic controller identification and status
+ type: complex
+ sample:
+ - [{"name": "A", "serial": "021632007299", "status": "optimal"},
+ {"name": "B", "serial": "021632007300", "status": "failed"}]
+ netapp_disks:
+ description: drive list that contains identification, type, and status information for each drive
+ type: complex
+ sample:
+ - [{"available": false,
+ "firmware_version": "MS02",
+ "id": "01000000500003960C8B67880000000000000000",
+ "media_type": "ssd",
+ "product_id": "PX02SMU080 ",
+ "serial_number": "15R0A08LT2BA",
+ "status": "optimal",
+ "tray_ref": "0E00000000000000000000000000000000000000",
+ "usable_bytes": "799629205504" }]
+ netapp_driveside_interfaces:
+ description: drive side interface list that contains identification, type, and speed for each interface
+ type: complex
+ sample:
+ - [{ "controller": "A", "interface_speed": "12g", "interface_type": "sas" }]
+ - [{ "controller": "B", "interface_speed": "10g", "interface_type": "iscsi" }]
+ netapp_enabled_features:
+ description: specifies the enabled features on the storage array.
+ returned: on success
+ type: complex
+ sample:
+ - [ "flashReadCache", "performanceTier", "protectionInformation", "secureVolume" ]
+ netapp_host_groups:
+ description: specifies the host groups on the storage arrays.
+ returned: on success
+ type: complex
+ sample:
+ - [{ "id": "85000000600A098000A4B28D003610705C40B964", "name": "group1" }]
+ netapp_hosts:
+ description: specifies the hosts on the storage arrays.
+ returned: on success
+ type: complex
+ sample:
+ - [{ "id": "8203800000000000000000000000000000000000",
+ "name": "host1",
+ "group_id": "85000000600A098000A4B28D003610705C40B964",
+ "host_type_index": 28,
+ "ports": [{ "type": "fc", "address": "1000FF7CFFFFFF01", "label": "FC_1" },
+ { "type": "fc", "address": "1000FF7CFFFFFF00", "label": "FC_2" }]}]
+ netapp_host_types:
+ description: lists the available host types on the storage array.
+ returned: on success
+ type: complex
+ sample:
+ - [{ "index": 0, "type": "FactoryDefault" },
+ { "index": 1, "type": "W2KNETNCL"},
+ { "index": 2, "type": "SOL" },
+ { "index": 5, "type": "AVT_4M" },
+ { "index": 6, "type": "LNX" },
+ { "index": 7, "type": "LnxALUA" },
+ { "index": 8, "type": "W2KNETCL" },
+ { "index": 9, "type": "AIX MPIO" },
+ { "index": 10, "type": "VmwTPGSALUA" },
+ { "index": 15, "type": "HPXTPGS" },
+ { "index": 17, "type": "SolTPGSALUA" },
+ { "index": 18, "type": "SVC" },
+ { "index": 22, "type": "MacTPGSALUA" },
+ { "index": 23, "type": "WinTPGSALUA" },
+ { "index": 24, "type": "LnxTPGSALUA" },
+ { "index": 25, "type": "LnxTPGSALUA_PM" },
+ { "index": 26, "type": "ONTAP_ALUA" },
+ { "index": 27, "type": "LnxTPGSALUA_SF" },
+ { "index": 28, "type": "LnxDHALUA" },
+ { "index": 29, "type": "ATTOClusterAllOS" }]
+ netapp_hostside_interfaces:
+ description: host side interface list that contains identification, configuration, type, speed, and
+ status information for each interface
+ type: complex
+ sample:
+ - [{"iscsi":
+ [{ "controller": "A",
+ "current_interface_speed": "10g",
+ "ipv4_address": "10.10.10.1",
+ "ipv4_enabled": true,
+ "ipv4_gateway": "10.10.10.1",
+ "ipv4_subnet_mask": "255.255.255.0",
+ "ipv6_enabled": false,
+ "iqn": "iqn.1996-03.com.netapp:2806.600a098000a81b6d0000000059d60c76",
+ "link_status": "up",
+ "mtu": 9000,
+ "supported_interface_speeds": [ "10g" ] }]}]
+ netapp_management_interfaces:
+ description: management interface list that contains identification, configuration, and status for
+ each interface
+ type: complex
+ sample:
+ - [{"alias": "ict-2800-A",
+ "channel": 1,
+ "controller": "A",
+ "dns_config_method": "dhcp",
+ "dns_servers": [],
+ "ipv4_address": "10.1.1.1",
+ "ipv4_address_config_method": "static",
+ "ipv4_enabled": true,
+ "ipv4_gateway": "10.113.1.1",
+ "ipv4_subnet_mask": "255.255.255.0",
+ "ipv6_enabled": false,
+ "link_status": "up",
+ "mac_address": "00A098A81B5D",
+ "name": "wan0",
+ "ntp_config_method": "disabled",
+ "ntp_servers": [],
+ "remote_ssh_access": false }]
+ netapp_storage_array:
+ description: provides storage array identification, firmware version, and available capabilities
+ type: dict
+ sample:
+ - {"chassis_serial": "021540006043",
+ "firmware": "08.40.00.01",
+ "name": "ict-2800-11_40",
+ "wwn": "600A098000A81B5D0000000059D60C76",
+ "cacheBlockSizes": [4096,
+ 8192,
+ 16384,
+ 32768],
+ "supportedSegSizes": [8192,
+ 16384,
+ 32768,
+ 65536,
+ 131072,
+ 262144,
+ 524288]}
+ netapp_storage_pools:
+ description: storage pool list that contains identification and capacity information for each pool
+ type: complex
+ sample:
+ - [{"available_capacity": "3490353782784",
+ "id": "04000000600A098000A81B5D000002B45A953A61",
+ "name": "Raid6",
+ "total_capacity": "5399466745856",
+ "used_capacity": "1909112963072" }]
+ netapp_volumes:
+ description: storage volume list that contains identification and capacity information for each volume
+ type: complex
+ sample:
+ - [{"capacity": "5368709120",
+ "id": "02000000600A098000AAC0C3000002C45A952BAA",
+ "is_thin_provisioned": false,
+ "name": "5G",
+ "parent_storage_pool_id": "04000000600A098000A81B5D000002B45A953A61" }]
+ netapp_workload_tags:
+ description: workload tag list
+ type: complex
+ sample:
+ - [{"id": "87e19568-43fb-4d8d-99ea-2811daaa2b38",
+ "name": "ftp_server",
+ "workloadAttributes": [{"key": "use",
+ "value": "general"}]}]
+ netapp_volumes_by_initiators:
+ description: list of available volumes keyed by the mapped initiators.
+ type: complex
+ sample:
+ - {"beegfs_host": [{"id": "02000000600A098000A4B9D1000015FD5C8F7F9E",
+ "meta_data": {"filetype": "ext4", "public": true},
+ "name": "some_volume",
+ "workload_name": "beegfs_metadata",
+ "workload_metadata": {"filetype": "ext4", "public": true},
+ "volume_metadata": '{"format_type": "ext4",
+ "format_options": "-i 2048 -I 512 -J size=400 -Odir_index,filetype",
+ "mount_options": "noatime,nodiratime,nobarrier,_netdev",
+ "mount_directory": "/data/beegfs/"}',
+ "host_types": ["nvmeof"],
+ "eui": "0000139A3885FA4500A0980000EAA272V",
+ "wwn": "600A098000A4B9D1000015FD5C8F7F9E"}]}
+ snapshot_images:
+ description: snapshot image list that contains identification, capacity, and status information for each
+ snapshot image
+ type: complex
+ sample:
+ - [{"active_cow": true,
+ "creation_method": "user",
+ "id": "34000000600A098000A81B5D00630A965B0535AC",
+ "pit_capacity": "5368709120",
+ "reposity_cap_utilization": "0",
+ "rollback_source": false,
+ "status": "optimal" }]
+ proxy_facts:
+ description: proxy storage system list
+ returned: on successful inquiry from from web services proxy's rest api
+ type: complex
+ contains:
+ ssid:
+ description: storage system id
+ type: str
+ sample: "ec8ed9d2-eba3-4cac-88fb-0954f327f1d4"
+ name:
+ description: storage system name
+ type: str
+ sample: "EF570-NVMe"
+ wwn:
+ description: storage system unique identifier
+ type: str
+ sample: "AC1100051E1E1E1E1E1E1E1E1E1E1E1E"
+ model:
+ description: NetApp E-Series model number
+ type: str
+ sample: "5700"
+ controller:
+ description: controller list that contains identification, ip addresses, and certificate information for
+ each controller
+ type: complex
+ sample: [{"certificateStatus": "selfSigned",
+ "controllerId": "070000000000000000000001",
+ "ipAddresses": ["172.17.0.5", "3.3.3.3"]}]
+ drive_types:
+ description: all available storage system drive types
+ type: list
+ sample: ["sas", "fibre"]
+ unconfigured_space:
+ description: unconfigured storage system space in bytes
+ type: str
+ sample: "982259020595200"
+ array_status:
+ description: storage system status
+ type: str
+ sample: "optimal"
+ password_status:
+ description: storage system password status
+ type: str
+ sample: "invalid"
+ certificate_status:
+ description: storage system ssl certificate status
+ type: str
+ sample: "untrusted"
+ firmware_version:
+ description: storage system install firmware version
+ type: str
+ sample: "08.50.42.99"
+ chassis_serial:
+ description: storage system chassis serial number
+ type: str
+ sample: "SX0810032"
+ asup_enabled:
+ description: storage system auto-support status
+ type: bool
+ sample: True
+"""
+
+from datetime import datetime
+import re
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule
+try:
+ from ansible.module_utils.ansible_release import __version__ as ansible_version
+except ImportError:
+ ansible_version = 'unknown'
+
+try:
+ from urlparse import urlparse, urlunparse
+except ImportError:
+ from urllib.parse import urlparse, urlunparse
+
+
+class Facts(NetAppESeriesModule):
+ def __init__(self):
+ web_services_version = "02.00.0000.0000"
+ super(Facts, self).__init__(ansible_options={},
+ web_services_version=web_services_version,
+ supports_check_mode=True)
+
+ def get_controllers(self):
+ """Retrieve a mapping of controller references to their labels."""
+ controllers = list()
+ try:
+ rc, controllers = self.request('storage-systems/%s/graph/xpath-filter?query=/controller/id' % self.ssid)
+ except Exception as err:
+ self.module.fail_json(
+ msg="Failed to retrieve controller list! Array Id [%s]. Error [%s]."
+ % (self.ssid, str(err)))
+
+ controllers.sort()
+
+ controllers_dict = {}
+ i = ord('A')
+ for controller in controllers:
+ label = chr(i)
+ controllers_dict[controller] = label
+ i += 1
+
+ return controllers_dict
+
+ def get_array_facts(self):
+ """Extract particular facts from the storage array graph"""
+ facts = dict(facts_from_proxy=(not self.is_embedded()), ssid=self.ssid)
+ controller_reference_label = self.get_controllers()
+ array_facts = None
+ hardware_inventory_facts = None
+
+ # Get the storage array graph
+ try:
+ rc, array_facts = self.request("storage-systems/%s/graph" % self.ssid)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to obtain facts from storage array with id [%s]. Error [%s]" % (self.ssid, str(error)))
+
+ # Get the storage array hardware inventory
+ try:
+ rc, hardware_inventory_facts = self.request("storage-systems/%s/hardware-inventory" % self.ssid)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to obtain hardware inventory from storage array with id [%s]. Error [%s]" % (self.ssid, str(error)))
+
+ # Get storage system specific key-value pairs
+ key_value_url = "key-values"
+ key_values = []
+ if not self.is_embedded() and self.ssid.lower() not in ["0", "proxy"]:
+ key_value_url = "storage-systems/%s/forward/devmgr/v2/key-values" % self.ssid
+ try:
+ rc, key_values = self.request(key_value_url)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to obtain embedded key-value database. Array [%s]. Error [%s]" % (self.ssid, str(error)))
+
+ facts['netapp_storage_array'] = dict(
+ name=array_facts['sa']['saData']['storageArrayLabel'],
+ chassis_serial=array_facts['sa']['saData']['chassisSerialNumber'],
+ firmware=array_facts['sa']['saData']['fwVersion'],
+ wwn=array_facts['sa']['saData']['saId']['worldWideName'],
+ segment_sizes=array_facts['sa']['featureParameters']['supportedSegSizes'],
+ cache_block_sizes=array_facts['sa']['featureParameters']['cacheBlockSizes'])
+
+ facts['netapp_controllers'] = [
+ dict(
+ name=controller_reference_label[controller['controllerRef']],
+ serial=controller['serialNumber'].strip(),
+ status=controller['status'],
+ ) for controller in array_facts['controller']]
+
+ facts['netapp_hosts'] = [
+ dict(
+ group_id=host['clusterRef'],
+ hosts_reference=host['hostRef'],
+ id=host['id'],
+ name=host['name'],
+ host_type_index=host['hostTypeIndex'],
+ ports=host['hostSidePorts']
+ ) for host in array_facts['storagePoolBundle']['host']]
+
+ facts['netapp_host_groups'] = [
+ dict(
+ id=group['id'],
+ name=group['name'],
+ hosts=[host['name'] for host in facts['netapp_hosts'] if host['group_id'] == group['id']]
+ ) for group in array_facts['storagePoolBundle']['cluster']]
+ facts['netapp_host_groups'].append(dict(
+ id='0000000000000000000000000000000000000000',
+ name='default_hostgroup',
+ hosts=[host["name"] for host in facts['netapp_hosts'] if host['group_id'] == '0000000000000000000000000000000000000000']))
+
+ facts['netapp_host_types'] = [
+ dict(
+ type=host_type['hostType'],
+ index=host_type['index']
+ ) for host_type in array_facts['sa']['hostSpecificVals']
+ if 'hostType' in host_type.keys() and host_type['hostType']
+ # This conditional ignores zero-length strings which indicates that the associated host-specific NVSRAM region has been cleared.
+ ]
+
+ facts['snapshot_images'] = [
+ dict(
+ id=snapshot['id'],
+ status=snapshot['status'],
+ pit_capacity=snapshot['pitCapacity'],
+ creation_method=snapshot['creationMethod'],
+ reposity_cap_utilization=snapshot['repositoryCapacityUtilization'],
+ active_cow=snapshot['activeCOW'],
+ rollback_source=snapshot['isRollbackSource']
+ ) for snapshot in array_facts['highLevelVolBundle']['pit']]
+
+ facts['netapp_disks'] = [
+ dict(
+ id=disk['id'],
+ available=disk['available'],
+ media_type=disk['driveMediaType'],
+ status=disk['status'],
+ usable_bytes=disk['usableCapacity'],
+ tray_ref=disk['physicalLocation']['trayRef'],
+ product_id=disk['productID'],
+ firmware_version=disk['firmwareVersion'],
+ serial_number=disk['serialNumber'].lstrip()
+ ) for disk in array_facts['drive']]
+
+ facts['netapp_management_interfaces'] = [
+ dict(controller=controller_reference_label[controller['controllerRef']],
+ name=iface['ethernet']['interfaceName'],
+ alias=iface['ethernet']['alias'],
+ channel=iface['ethernet']['channel'],
+ mac_address=iface['ethernet']['macAddr'],
+ remote_ssh_access=iface['ethernet']['rloginEnabled'],
+ link_status=iface['ethernet']['linkStatus'],
+ ipv4_enabled=iface['ethernet']['ipv4Enabled'],
+ ipv4_address_config_method=iface['ethernet']['ipv4AddressConfigMethod'].lower().replace("config", ""),
+ ipv4_address=iface['ethernet']['ipv4Address'],
+ ipv4_subnet_mask=iface['ethernet']['ipv4SubnetMask'],
+ ipv4_gateway=iface['ethernet']['ipv4GatewayAddress'],
+ ipv6_enabled=iface['ethernet']['ipv6Enabled'],
+ dns_config_method=iface['ethernet']['dnsProperties']['acquisitionProperties']['dnsAcquisitionType'],
+ dns_servers=(iface['ethernet']['dnsProperties']['acquisitionProperties']['dnsServers']
+ if iface['ethernet']['dnsProperties']['acquisitionProperties']['dnsServers'] else []),
+ ntp_config_method=iface['ethernet']['ntpProperties']['acquisitionProperties']['ntpAcquisitionType'],
+ ntp_servers=(iface['ethernet']['ntpProperties']['acquisitionProperties']['ntpServers']
+ if iface['ethernet']['ntpProperties']['acquisitionProperties']['ntpServers'] else [])
+ ) for controller in array_facts['controller'] for iface in controller['netInterfaces']]
+
+ facts['netapp_hostside_interfaces'] = [
+ dict(
+ fc=[dict(controller=controller_reference_label[controller['controllerRef']],
+ channel=iface['fibre']['channel'],
+ link_status=iface['fibre']['linkStatus'],
+ current_interface_speed=strip_interface_speed(iface['fibre']['currentInterfaceSpeed']),
+ maximum_interface_speed=strip_interface_speed(iface['fibre']['maximumInterfaceSpeed']))
+ for controller in array_facts['controller']
+ for iface in controller['hostInterfaces']
+ if iface['interfaceType'] == 'fc'],
+ ib=[dict(controller=controller_reference_label[controller['controllerRef']],
+ channel=iface['ib']['channel'],
+ link_status=iface['ib']['linkState'],
+ mtu=iface['ib']['maximumTransmissionUnit'],
+ current_interface_speed=strip_interface_speed(iface['ib']['currentSpeed']),
+ maximum_interface_speed=strip_interface_speed(iface['ib']['supportedSpeed']))
+ for controller in array_facts['controller']
+ for iface in controller['hostInterfaces']
+ if iface['interfaceType'] == 'ib'],
+ iscsi=[dict(controller=controller_reference_label[controller['controllerRef']],
+ iqn=iface['iscsi']['iqn'],
+ link_status=iface['iscsi']['interfaceData']['ethernetData']['linkStatus'],
+ ipv4_enabled=iface['iscsi']['ipv4Enabled'],
+ ipv4_address=iface['iscsi']['ipv4Data']['ipv4AddressData']['ipv4Address'],
+ ipv4_subnet_mask=iface['iscsi']['ipv4Data']['ipv4AddressData']['ipv4SubnetMask'],
+ ipv4_gateway=iface['iscsi']['ipv4Data']['ipv4AddressData']['ipv4GatewayAddress'],
+ ipv6_enabled=iface['iscsi']['ipv6Enabled'],
+ mtu=iface['iscsi']['interfaceData']['ethernetData']['maximumFramePayloadSize'],
+ current_interface_speed=strip_interface_speed(iface['iscsi']['interfaceData']
+ ['ethernetData']['currentInterfaceSpeed']),
+ supported_interface_speeds=strip_interface_speed(iface['iscsi']['interfaceData']
+ ['ethernetData']
+ ['supportedInterfaceSpeeds']))
+ for controller in array_facts['controller']
+ for iface in controller['hostInterfaces']
+ if iface['interfaceType'] == 'iscsi' and iface['iscsi']['interfaceData']['type'] == 'ethernet'],
+ sas=[dict(controller=controller_reference_label[controller['controllerRef']],
+ channel=iface['sas']['channel'],
+ current_interface_speed=strip_interface_speed(iface['sas']['currentInterfaceSpeed']),
+ maximum_interface_speed=strip_interface_speed(iface['sas']['maximumInterfaceSpeed']),
+ link_status=iface['sas']['iocPort']['state'])
+ for controller in array_facts['controller']
+ for iface in controller['hostInterfaces']
+ if iface['interfaceType'] == 'sas'])]
+
+ facts['netapp_driveside_interfaces'] = [
+ dict(
+ controller=controller_reference_label[controller['controllerRef']],
+ interface_type=interface['interfaceType'],
+ interface_speed=strip_interface_speed(
+ interface[interface['interfaceType']]['maximumInterfaceSpeed']
+ if (interface['interfaceType'] == 'sata' or
+ interface['interfaceType'] == 'sas' or
+ interface['interfaceType'] == 'fibre')
+ else (
+ interface[interface['interfaceType']]['currentSpeed']
+ if interface['interfaceType'] == 'ib'
+ else (
+ interface[interface['interfaceType']]['interfaceData']['maximumInterfaceSpeed']
+ if interface['interfaceType'] == 'iscsi' else 'unknown'
+ ))),
+ )
+ for controller in array_facts['controller']
+ for interface in controller['driveInterfaces']]
+
+ facts['netapp_storage_pools'] = [
+ dict(
+ id=storage_pool['id'],
+ name=storage_pool['name'],
+ available_capacity=storage_pool['freeSpace'],
+ total_capacity=storage_pool['totalRaidedSpace'],
+ used_capacity=storage_pool['usedSpace']
+ ) for storage_pool in array_facts['volumeGroup']]
+
+ all_volumes = list(array_facts['volume'] + array_facts['highLevelVolBundle']['thinVolume'])
+
+ facts['netapp_volumes'] = [
+ dict(
+ id=v['id'],
+ name=v['name'],
+ parent_storage_pool_id=v['volumeGroupRef'],
+ capacity=v['capacity'],
+ is_thin_provisioned=v['thinProvisioned'],
+ workload=v['metadata'],
+
+ ) for v in all_volumes]
+
+ # Add access volume information to volumes when enabled.
+ if array_facts['sa']['accessVolume']['enabled']:
+ facts['netapp_volumes'].append(dict(
+ id=array_facts['sa']['accessVolume']['id'],
+ name="access_volume",
+ parent_storage_pool_id="",
+ capacity=array_facts['sa']['accessVolume']['capacity'],
+ is_thin_provisioned=False,
+ workload=""))
+
+ facts['netapp_snapshot_consistency_groups'] = []
+ for group in array_facts["highLevelVolBundle"]["pitConsistencyGroup"]:
+ reserve_capacity_full_policy = "purge" if group["repFullPolicy"] == "purgepit" else "reject"
+ group_info = {"id": group["id"],
+ "name": group["name"],
+ "reserve_capacity_full_policy": reserve_capacity_full_policy,
+ "rollback_priority": group["rollbackPriority"],
+ "base_volumes": [],
+ "pit_images": [],
+ "pit_views": {}}
+
+ # Determine all consistency group base volumes.
+ volumes_by_id = {}
+ for pit_group in array_facts["highLevelVolBundle"]["pitGroup"]:
+ if pit_group["consistencyGroupRef"] == group["id"]:
+ for volume in array_facts["volume"]:
+ if volume["id"] == pit_group["baseVolume"]:
+ volumes_by_id.update({volume["id"]: volume["name"]})
+ group_info["base_volumes"].append({"id": volume["id"],
+ "name": volume["name"],
+ "reserve_capacity_volume_id": pit_group["repositoryVolume"]})
+ break
+
+ # Determine all consistency group pit snapshot images.
+ group_pit_key_values = {}
+ for entry in key_values:
+ if re.search("ansible\\|%s\\|" % group["name"], entry["key"]):
+ pit_name = entry["key"].replace("ansible|%s|" % group["name"], "")
+ pit_values = entry["value"].split("|")
+ if len(pit_values) == 3:
+ timestamp, image_id, description = pit_values
+ group_pit_key_values.update({timestamp: {"name": pit_name, "description": description}})
+
+ pit_by_id = {}
+ for pit in array_facts["highLevelVolBundle"]["pit"]:
+ if pit["consistencyGroupId"] == group["id"]:
+
+ if pit["pitTimestamp"] in group_pit_key_values.keys():
+ pit_image = {"name": group_pit_key_values[pit["pitTimestamp"]]["name"],
+ "description": group_pit_key_values[pit["pitTimestamp"]]["description"],
+ "timestamp": datetime.fromtimestamp(int(pit["pitTimestamp"])).strftime("%Y-%m-%d %H:%M:%S")}
+ else:
+ pit_image = {"name": "", "description": "",
+ "timestamp": datetime.fromtimestamp(int(pit["pitTimestamp"])).strftime("%Y-%m-%d %H:%M:%S")}
+ group_info["pit_images"].append(pit_image)
+ pit_by_id.update({pit["id"]: pit_image})
+
+ # Determine all consistency group pit views.
+ for view in array_facts["highLevelVolBundle"]["pitView"]:
+ if view["consistencyGroupId"] == group["id"]:
+ view_timestamp = datetime.fromtimestamp(int(view["viewTime"])).strftime("%Y-%m-%d %H:%M:%S")
+ reserve_capacity_pct = int(round(float(view["repositoryCapacity"]) / float(view["baseVolumeCapacity"]) * 100))
+ if view_timestamp in group_info["pit_views"].keys():
+ group_info["pit_views"][view_timestamp]["volumes"].append({"name": view["name"],
+ "base_volume": volumes_by_id[view["baseVol"]],
+ "writable": view["accessMode"] == "readWrite",
+ "reserve_capacity_pct": reserve_capacity_pct,
+ "status": view["status"]})
+ else:
+ group_info["pit_views"].update({view_timestamp: {"name": pit_by_id[view["basePIT"]]["name"],
+ "description": pit_by_id[view["basePIT"]]["description"],
+ "volumes": [{"name": view["name"],
+ "base_volume": volumes_by_id[view["baseVol"]],
+ "writable": view["accessMode"] == "readWrite",
+ "reserve_capacity_pct": reserve_capacity_pct,
+ "status": view["status"]}]}})
+
+ facts['netapp_snapshot_consistency_groups'].append(group_info)
+
+ lun_mappings = dict()
+ for host in facts['netapp_hosts']:
+ lun_mappings.update({host["name"]: []})
+ for host in facts['netapp_host_groups']:
+ lun_mappings.update({host["name"]: []})
+
+ facts['netapp_default_hostgroup_access_volume_lun'] = None
+ for lun in [a['lun'] for a in array_facts['storagePoolBundle']['lunMapping']
+ if a['type'] == 'all' and a['mapRef'] == '0000000000000000000000000000000000000000']:
+ facts['netapp_default_hostgroup_access_volume_lun'] = lun
+
+ # Get all host mappings
+ host_mappings = dict()
+ for host_mapping in [h for h in array_facts['storagePoolBundle']['lunMapping'] if h['type'] == 'host']:
+ for host_name in [h['name'] for h in facts['netapp_hosts'] if h['id'] == host_mapping['mapRef']]:
+ for volume in [v['name'] for v in facts['netapp_volumes'] if v['id'] == host_mapping['volumeRef']]:
+ if host_name in host_mappings.keys():
+ host_mappings[host_name].append((volume, host_mapping['lun']))
+ else:
+ host_mappings[host_name] = [(volume, host_mapping['lun'])]
+
+ # Get all host group mappings
+ group_mappings = dict()
+ for group_mapping in [h for h in array_facts['storagePoolBundle']['lunMapping'] if h['type'] == 'cluster']:
+ for group_name, group_hosts in [(g['name'], g['hosts']) for g in facts['netapp_host_groups'] if g['id'] == group_mapping['mapRef']]:
+ for volume in [v['name'] for v in facts['netapp_volumes'] if v['id'] == group_mapping['volumeRef']]:
+ if group_name in group_mappings.keys():
+ group_mappings[group_name].append((volume, group_mapping['lun']))
+ else:
+ group_mappings[group_name] = [(volume, group_mapping['lun'])]
+
+ for host_name in [h for h in group_hosts if h in host_mappings.keys()]:
+ if host_name in host_mappings.keys():
+ host_mappings[host_name].append((volume, group_mapping['lun']))
+ else:
+ host_mappings[host_name] = [(volume, group_mapping['lun'])]
+
+ facts['netapp_luns_by_target'] = lun_mappings
+ if host_mappings:
+ facts['netapp_luns_by_target'].update(host_mappings)
+ if group_mappings:
+ facts['netapp_luns_by_target'].update(group_mappings)
+
+ # Add all host mappings to respective groups mappings
+ for host_group in facts['netapp_host_groups']:
+ group_name = host_group['name']
+ for host in host_group['hosts']:
+ facts['netapp_luns_by_target'][group_name].extend(facts['netapp_luns_by_target'][host])
+
+ # Remove duplicate entries
+ for obj in facts['netapp_luns_by_target'].keys():
+ tmp = dict(facts['netapp_luns_by_target'][obj])
+ facts['netapp_luns_by_target'][obj] = [(k, tmp[k]) for k in tmp.keys()]
+
+ workload_tags = None
+ try:
+ rc, workload_tags = self.request("storage-systems/%s/workloads" % self.ssid)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve workload tags. Array [%s]." % self.ssid)
+
+ facts['netapp_workload_tags'] = [
+ dict(
+ id=workload_tag['id'],
+ name=workload_tag['name'],
+ attributes=workload_tag['workloadAttributes']
+ ) for workload_tag in workload_tags]
+
+ targets = array_facts["storagePoolBundle"]["target"]
+
+ facts['netapp_hostside_io_interfaces'] = []
+ if "ioInterface" in array_facts:
+ for interface in array_facts["ioInterface"]:
+
+ # Select only the host side channels
+ if interface["channelType"] == "hostside":
+ interface_type = interface["ioInterfaceTypeData"]["interfaceType"]
+ if interface_type == "fibre":
+ interface_type = "fc"
+ elif interface_type == "nvmeCouplingDriver":
+ interface_type = "couplingDriverNvme"
+
+ interface_data = interface["ioInterfaceTypeData"][interface_type]
+ command_protocol_properties = interface["commandProtocolPropertiesList"]["commandProtocolProperties"]
+
+ # Build generic information for each interface entry
+ interface_info = {"protocol": "unknown",
+ "interface_reference": interface_data["interfaceRef"],
+ "controller_reference": interface["controllerRef"],
+ "channel_port_reference": interface_data["channelPortRef"] if "channelPortRef" in interface_data else "",
+ "controller": controller_reference_label[interface["controllerRef"]],
+ "channel": interface_data["channel"],
+ "part": "unknown",
+ "link_status": "unknown",
+ "speed": {"current": "unknown", "maximum": "unknown", "supported": []},
+ "mtu": None,
+ "guid": None,
+ "lid": None,
+ "nqn": None,
+ "iqn": None,
+ "wwnn": None,
+ "wwpn": None,
+ "ipv4": None, # enabled, config_method, address, subnet, gateway
+ "ipv6": None} # for expansion if needed
+
+ # Determine storage target identifiers
+ controller_iqn = "unknown"
+ controller_nqn = "unknown"
+ for target in targets:
+ if target["nodeName"]["ioInterfaceType"] == "nvmeof":
+ controller_nqn = target["nodeName"]["nvmeNodeName"]
+ if target["nodeName"]["ioInterfaceType"] == "iscsi":
+ controller_iqn = target["nodeName"]["iscsiNodeName"]
+
+ # iSCSI IO interface
+ if interface_type == "iscsi":
+ interface_info.update({"ipv4": {"enabled": interface_data["ipv4Enabled"],
+ "config_method": interface_data["ipv4Data"]["ipv4AddressConfigMethod"],
+ "address": interface_data["ipv4Data"]["ipv4AddressData"]["ipv4Address"],
+ "subnet": interface_data["ipv4Data"]["ipv4AddressData"]["ipv4SubnetMask"],
+ "gateway": interface_data["ipv4Data"]["ipv4AddressData"]["ipv4GatewayAddress"]}})
+
+ # InfiniBand (iSER) protocol
+ if interface_data["interfaceData"]["type"] == "infiniband" and interface_data["interfaceData"]["infinibandData"]["isIser"]:
+ interface_info.update({"protocol": "ib_iser",
+ "iqn": controller_iqn})
+
+ # Get more details from hardware-inventory
+ for ib_port in hardware_inventory_facts["ibPorts"]:
+ if ib_port["channelPortRef"] == interface_info["channel_port_reference"]:
+ interface_info.update({"link_status": ib_port["linkState"],
+ "guid": ib_port["globalIdentifier"],
+ "lid": ib_port["localIdentifier"],
+ "speed": {"current": strip_interface_speed(ib_port["currentSpeed"]),
+ "maximum": strip_interface_speed(ib_port["supportedSpeed"])[-1],
+ "supported": strip_interface_speed(ib_port["supportedSpeed"])}})
+
+ # iSCSI protocol
+ elif interface_data["interfaceData"]["type"] == "ethernet":
+ ethernet_data = interface_data["interfaceData"]["ethernetData"]
+ interface_info.update({"protocol": "iscsi",
+ "iqn": controller_iqn})
+ interface_info.update({"part": "%s,%s" % (ethernet_data["partData"]["vendorName"], ethernet_data["partData"]["partNumber"]),
+ "link_status": ethernet_data["linkStatus"],
+ "mtu": ethernet_data["maximumFramePayloadSize"],
+ "speed": {"current": strip_interface_speed(ethernet_data["currentInterfaceSpeed"]),
+ "maximum": strip_interface_speed(ethernet_data["maximumInterfaceSpeed"]),
+ "supported": strip_interface_speed(ethernet_data["supportedInterfaceSpeeds"])}})
+
+ # Fibre Channel IO interface
+ elif interface_type == "fc":
+ interface_info.update({"wwnn": interface_data["nodeName"],
+ "wwpn": interface_data["addressId"],
+ "part": interface_data["part"],
+ "link_status": interface_data["linkStatus"],
+ "speed": {"current": strip_interface_speed(interface_data["currentInterfaceSpeed"]),
+ "maximum": strip_interface_speed(interface_data["maximumInterfaceSpeed"]),
+ "supported": "unknown"}})
+
+ # NVMe over fibre channel protocol
+ if (command_protocol_properties and command_protocol_properties[0]["commandProtocol"] == "nvme" and
+ command_protocol_properties[0]["nvmeProperties"]["commandSet"] == "nvmeof" and
+ command_protocol_properties[0]["nvmeProperties"]["nvmeofProperties"]["fcProperties"]):
+ interface_info.update({"protocol": "nvme_fc",
+ "nqn": controller_nqn})
+
+ # Fibre channel protocol
+ else:
+ interface_info.update({"protocol": "fc"})
+
+ # SAS IO interface
+ elif interface_type == "sas":
+ interface_info.update({"protocol": "sas",
+ "wwpn": interface_data["addressId"],
+ "part": interface_data["part"],
+ "speed": {"current": strip_interface_speed(interface_data["currentInterfaceSpeed"]),
+ "maximum": strip_interface_speed(interface_data["maximumInterfaceSpeed"]),
+ "supported": "unknown"}})
+
+ # Infiniband IO interface
+ elif interface_type == "ib":
+ interface_info.update({"link_status": interface_data["linkState"],
+ "speed": {"current": strip_interface_speed(interface_data["currentSpeed"]),
+ "maximum": strip_interface_speed(interface_data["supportedSpeed"])[-1],
+ "supported": strip_interface_speed(interface_data["supportedSpeed"])},
+ "mtu": interface_data["maximumTransmissionUnit"],
+ "guid": interface_data["globalIdentifier"],
+ "lid": interface_data["localIdentifier"]})
+
+ # Determine protocol (NVMe over Infiniband, InfiniBand iSER, InfiniBand SRP)
+ if interface_data["isNVMeSupported"]:
+ interface_info.update({"protocol": "nvme_ib",
+ "nqn": controller_nqn})
+ elif interface_data["isISERSupported"]:
+ interface_info.update({"protocol": "ib_iser",
+ "iqn": controller_iqn})
+ elif interface_data["isSRPSupported"]:
+ interface_info.update({"protocol": "ib_srp"})
+
+ # Determine command protocol information
+ if command_protocol_properties:
+ for command_protocol_property in command_protocol_properties:
+ if command_protocol_property["commandProtocol"] == "nvme":
+ if command_protocol_property["nvmeProperties"]["commandSet"] == "nvmeof":
+ ip_address_data = command_protocol_property["nvmeProperties"]["nvmeofProperties"]["ibProperties"]["ipAddressData"]
+ if ip_address_data["addressType"] == "ipv4":
+ interface_info.update({"ipv4": {"enabled": True,
+ "config_method": "configStatic",
+ "address": ip_address_data["ipv4Data"]["ipv4Address"],
+ "subnet": ip_address_data["ipv4Data"]["ipv4SubnetMask"],
+ "gateway": ip_address_data["ipv4Data"]["ipv4GatewayAddress"]}})
+
+ elif command_protocol_property["commandProtocol"] == "scsi":
+ if command_protocol_property["scsiProperties"]["scsiProtocolType"] == "iser":
+ ipv4_data = command_protocol_property["scsiProperties"]["iserProperties"]["ipv4Data"]
+ interface_info.update({"ipv4": {"enabled": True,
+ "config_method": ipv4_data["ipv4AddressConfigMethod"],
+ "address": ipv4_data["ipv4AddressData"]["ipv4Address"],
+ "subnet": ipv4_data["ipv4AddressData"]["ipv4SubnetMask"],
+ "gateway": ipv4_data["ipv4AddressData"]["ipv4GatewayAddress"]}})
+
+ # Ethernet IO interface
+ elif interface_type == "ethernet":
+ ethernet_data = interface_data["interfaceData"]["ethernetData"]
+ interface_info.update({"part": "%s,%s" % (ethernet_data["partData"]["vendorName"], ethernet_data["partData"]["partNumber"]),
+ "link_status": ethernet_data["linkStatus"],
+ "mtu": ethernet_data["maximumFramePayloadSize"],
+ "speed": {"current": strip_interface_speed(ethernet_data["currentInterfaceSpeed"]),
+ "maximum": strip_interface_speed(ethernet_data["maximumInterfaceSpeed"]),
+ "supported": strip_interface_speed(ethernet_data["supportedInterfaceSpeeds"])}})
+
+ # Determine command protocol information
+ if command_protocol_properties:
+ for command_protocol_property in command_protocol_properties:
+ if command_protocol_property["commandProtocol"] == "nvme":
+ if command_protocol_property["nvmeProperties"]["commandSet"] == "nvmeof":
+
+ nvmeof_properties = command_protocol_property["nvmeProperties"]["nvmeofProperties"]
+ if nvmeof_properties["provider"] == "providerRocev2":
+ ipv4_data = nvmeof_properties["roceV2Properties"]["ipv4Data"]
+ interface_info.update({"protocol": "nvme_roce",
+ "nqn": controller_nqn})
+ interface_info.update({"ipv4": {"enabled": nvmeof_properties["roceV2Properties"]["ipv4Enabled"],
+ "config_method": ipv4_data["ipv4AddressConfigMethod"],
+ "address": ipv4_data["ipv4AddressData"]["ipv4Address"],
+ "subnet": ipv4_data["ipv4AddressData"]["ipv4SubnetMask"],
+ "gateway": ipv4_data["ipv4AddressData"]["ipv4GatewayAddress"]}})
+
+
+ facts['netapp_hostside_io_interfaces'].append(interface_info)
+
+ # Gather information from controller->hostInterfaces if available (This is a deprecated data structure. Prefer information from ioInterface.
+ for controller in array_facts['controller']:
+ if "hostInterfaces" in controller.keys():
+ for interface in controller['hostInterfaces']:
+
+ # Ignore any issue with this data structure since its a deprecated data structure.
+ try:
+ interface_type = interface["interfaceType"]
+ interface_data = interface["fibre" if interface_type == "fc" else interface_type]
+
+ # Build generic information for each interface entry
+ interface_info = {"protocol": "unknown",
+ "interface_reference": interface_data["interfaceRef"],
+ "controller_reference": controller["controllerRef"],
+ "channel_port_reference": interface_data["channelPortRef"] if "channelPortRef" in interface_data else "",
+ "controller": controller_reference_label[controller["controllerRef"]],
+ "channel": interface_data["channel"],
+ "part": "unknown",
+ "link_status": "unknown",
+ "speed": {"current": "unknown", "maximum": "unknown", "supported": []},
+ "mtu": None,
+ "guid": None,
+ "lid": None,
+ "nqn": None,
+ "iqn": None,
+ "wwnn": None,
+ "wwpn": None,
+ "ipv4": None, # enabled, config_method, address, subnet, gateway
+ "ipv6": None} # for expansion if needed
+
+ # Add target information
+ for target in targets:
+ if target["nodeName"]["ioInterfaceType"] == "nvmeof":
+ interface_info.update({"nqn": target["nodeName"]["nvmeNodeName"]})
+ if target["nodeName"]["ioInterfaceType"] == "iscsi":
+ interface_info.update({"iqn": target["nodeName"]["iscsiNodeName"]})
+
+ # iSCSI IO interface
+ if interface_type == "iscsi":
+ interface_info.update({"ipv4": {"enabled": interface_data["ipv4Enabled"],
+ "config_method": interface_data["ipv4Data"]["ipv4AddressConfigMethod"],
+ "address": interface_data["ipv4Data"]["ipv4AddressData"]["ipv4Address"],
+ "subnet": interface_data["ipv4Data"]["ipv4AddressData"]["ipv4SubnetMask"],
+ "gateway": interface_data["ipv4Data"]["ipv4AddressData"]["ipv4GatewayAddress"]}})
+ # InfiniBand (iSER) protocol
+ if interface_data["interfaceData"]["type"] == "infiniband" and interface_data["interfaceData"]["infinibandData"]["isIser"]:
+ interface_info.update({"protocol": "ib_iser"})
+
+ # Get more details from hardware-inventory
+ for ib_port in hardware_inventory_facts["ibPorts"]:
+ if ib_port["channelPortRef"] == interface_info["channel_port_reference"]:
+ interface_info.update({"link_status": ib_port["linkState"],
+ "guid": ib_port["globalIdentifier"],
+ "lid": ib_port["localIdentifier"],
+ "speed": {"current": strip_interface_speed(ib_port["currentSpeed"]),
+ "maximum": strip_interface_speed(ib_port["supportedSpeed"])[-1],
+ "supported": strip_interface_speed(ib_port["supportedSpeed"])}})
+ # iSCSI protocol
+ elif interface_data["interfaceData"]["type"] == "ethernet":
+ ethernet_data = interface_data["interfaceData"]["ethernetData"]
+ interface_info.update({"protocol": "iscsi"})
+ interface_info.update({"part": "%s,%s" % (ethernet_data["partData"]["vendorName"], ethernet_data["partData"]["partNumber"]),
+ "link_status": ethernet_data["linkStatus"],
+ "mtu": ethernet_data["maximumFramePayloadSize"],
+ "speed": {"current": strip_interface_speed(ethernet_data["currentInterfaceSpeed"]),
+ "maximum": strip_interface_speed(ethernet_data["maximumInterfaceSpeed"]),
+ "supported": strip_interface_speed(ethernet_data["supportedInterfaceSpeeds"])}})
+ # Fibre Channel IO interface
+ elif interface_type == "fc":
+ interface_info.update({"protocol": "fc",
+ "wwnn": interface_data["nodeName"],
+ "wwpn": interface_data["addressId"],
+ "link_status": interface_data["linkStatus"],
+ "speed": {"current": strip_interface_speed(interface_data["currentInterfaceSpeed"]),
+ "maximum": strip_interface_speed(interface_data["maximumInterfaceSpeed"]),
+ "supported": "unknown"}})
+ # SAS IO interface
+ elif interface_type == "sas":
+ interface_info.update({"protocol": "sas",
+ "wwpn": interface_data["iocPort"]["portTypeData"]["portIdentifier"],
+ "part": interface_data["part"],
+ "speed": {"current": strip_interface_speed(interface_data["currentInterfaceSpeed"]),
+ "maximum": strip_interface_speed(interface_data["maximumInterfaceSpeed"]),
+ "supported": "unknown"}})
+ # Infiniband IO interface
+ elif interface_type == "ib":
+ interface_info.update({"link_status": interface_data["linkState"],
+ "speed": {"current": strip_interface_speed(interface_data["currentSpeed"]),
+ "maximum": strip_interface_speed(interface_data["supportedSpeed"])[-1],
+ "supported": strip_interface_speed(interface_data["supportedSpeed"])},
+ "mtu": interface_data["maximumTransmissionUnit"],
+ "guid": interface_data["globalIdentifier"],
+ "lid": interface_data["localIdentifier"]})
+
+ # Determine protocol (NVMe over Infiniband, InfiniBand iSER, InfiniBand SRP)
+ if interface_data["isNVMeSupported"]:
+ interface_info.update({"protocol": "nvme_ib"})
+ elif interface_data["isISERSupported"]:
+ interface_info.update({"protocol": "ib_iser"})
+ elif interface_data["isSRPSupported"]:
+ interface_info.update({"protocol": "ib_srp"})
+
+ # Ethernet IO interface
+ elif interface_type == "ethernet":
+ ethernet_data = interface_data["interfaceData"]["ethernetData"]
+ interface_info.update({"part": "%s,%s" % (ethernet_data["partData"]["vendorName"], ethernet_data["partData"]["partNumber"]),
+ "link_status": ethernet_data["linkStatus"],
+ "mtu": ethernet_data["maximumFramePayloadSize"],
+ "speed": {"current": strip_interface_speed(ethernet_data["currentInterfaceSpeed"]),
+ "maximum": strip_interface_speed(ethernet_data["maximumInterfaceSpeed"]),
+ "supported": strip_interface_speed(ethernet_data["supportedInterfaceSpeeds"])}})
+
+ # Only add interface if not already added (i.e. was part of ioInterface structure)
+ for existing_hostside_io_interfaces in facts['netapp_hostside_io_interfaces']:
+ if existing_hostside_io_interfaces["interface_reference"] == interface_info["interface_reference"]:
+ break
+ else:
+ facts['netapp_hostside_io_interfaces'].append(interface_info)
+ except Exception as error:
+ pass
+
+ # Create a dictionary of volume lists keyed by host names
+ facts['netapp_volumes_by_initiators'] = dict()
+ for mapping in array_facts['storagePoolBundle']['lunMapping']:
+ for host in facts['netapp_hosts']:
+ if mapping['mapRef'] == host['hosts_reference'] or mapping['mapRef'] == host['group_id']:
+ if host['name'] not in facts['netapp_volumes_by_initiators'].keys():
+ facts['netapp_volumes_by_initiators'].update({host['name']: []})
+
+ # Determine host io interface protocols
+ host_types = [port['type'] for port in host['ports']]
+ hostside_io_interface_protocols = []
+ host_port_protocols = []
+ host_port_information = {}
+ for interface in facts['netapp_hostside_io_interfaces']:
+ hostside_io_interface_protocols.append(interface["protocol"])
+ for host_type in host_types:
+ if host_type == "iscsi" and interface["protocol"] in ["iscsi", "ib_iser"]:
+ host_port_protocols.append(interface["protocol"])
+ if interface["protocol"] in host_port_information:
+ # Skip duplicate entries into host_port_information
+ for host_port_info in host_port_information[interface["protocol"]]:
+ if interface["interface_reference"] == host_port_info["interface_reference"]:
+ break
+ else:
+ host_port_information[interface["protocol"]].append(interface)
+ else:
+ host_port_information.update({interface["protocol"]: [interface]})
+
+ elif host_type == "fc" and interface["protocol"] in ["fc"]:
+ host_port_protocols.append(interface["protocol"])
+ if interface["protocol"] in host_port_information:
+ # Skip duplicate entries into host_port_information
+ for host_port_info in host_port_information[interface["protocol"]]:
+ if interface["interface_reference"] == host_port_info["interface_reference"]:
+ break
+ else:
+ host_port_information[interface["protocol"]].append(interface)
+ else:
+ host_port_information.update({interface["protocol"]: [interface]})
+
+ elif host_type == "sas" and interface["protocol"] in ["sas"]:
+ host_port_protocols.append(interface["protocol"])
+ if interface["protocol"] in host_port_information:
+ # Skip duplicate entries into host_port_information
+ for host_port_info in host_port_information[interface["protocol"]]:
+ if interface["interface_reference"] == host_port_info["interface_reference"]:
+ break
+ else:
+ host_port_information[interface["protocol"]].append(interface)
+ else:
+ host_port_information.update({interface["protocol"]: [interface]})
+
+ elif host_type == "ib" and interface["protocol"] in ["ib_iser", "ib_srp"]:
+ host_port_protocols.append(interface["protocol"])
+ if interface["protocol"] in host_port_information:
+ # Skip duplicate entries into host_port_information
+ for host_port_info in host_port_information[interface["protocol"]]:
+ if interface["interface_reference"] == host_port_info["interface_reference"]:
+ break
+ else:
+ host_port_information[interface["protocol"]].append(interface)
+ else:
+ host_port_information.update({interface["protocol"]: [interface]})
+
+ elif host_type == "nvmeof" and interface["protocol"] in ["nvme_ib", "nvme_fc", "nvme_roce"]:
+ host_port_protocols.append(interface["protocol"])
+ if interface["protocol"] in host_port_information:
+ # Skip duplicate entries into host_port_information
+ for host_port_info in host_port_information[interface["protocol"]]:
+ if interface["interface_reference"] == host_port_info["interface_reference"]:
+ break
+ else:
+ host_port_information[interface["protocol"]].append(interface)
+ else:
+ host_port_information.update({interface["protocol"]: [interface]})
+
+ for volume in all_volumes:
+ storage_pool = [pool["name"] for pool in facts['netapp_storage_pools'] if pool["id"] == volume["volumeGroupRef"]][0]
+
+ if mapping['id'] in [volume_mapping['id'] for volume_mapping in volume['listOfMappings']]:
+
+ # Determine workload name if there is one
+ workload_name = ""
+ metadata = dict()
+ for volume_tag in volume['metadata']:
+ if volume_tag['key'] == 'workloadId':
+ for workload_tag in facts['netapp_workload_tags']:
+ if volume_tag['value'] == workload_tag['id']:
+ workload_name = workload_tag['name']
+ metadata = dict((entry['key'], entry['value'])
+ for entry in workload_tag['attributes']
+ if entry['key'] != 'profileId')
+
+ # Get volume specific metadata tags
+ volume_metadata_raw = dict()
+ volume_metadata = dict()
+ for entry in volume['metadata']:
+ volume_metadata_raw.update({entry["key"]: entry["value"]})
+
+ for sorted_key in sorted(volume_metadata_raw.keys()):
+ if re.match(".*~[0-9]$", sorted_key):
+ key = re.sub("~[0-9]$", "", sorted_key)
+ if key in volume_metadata:
+ volume_metadata[key] = volume_metadata[key] + volume_metadata_raw[sorted_key]
+ else:
+ volume_metadata.update({key: volume_metadata_raw[sorted_key]})
+ else:
+ volume_metadata.update({sorted_key: volume_metadata_raw[sorted_key]})
+
+ # Determine drive count
+ stripe_count = 0
+ vg_drive_num = sum(1 for d in array_facts['drive'] if d['currentVolumeGroupRef'] == volume['volumeGroupRef'] and not d['hotSpare'])
+
+ if volume['raidLevel'] == "raidDiskPool":
+ stripe_count = 8
+ if volume['raidLevel'] == "raid0":
+ stripe_count = vg_drive_num
+ if volume['raidLevel'] == "raid1":
+ stripe_count = int(vg_drive_num / 2)
+ if volume['raidLevel'] in ["raid3", "raid5"]:
+ stripe_count = vg_drive_num - 1
+ if volume['raidLevel'] == "raid6":
+ stripe_count = vg_drive_num - 2
+
+ volume_info = {"type": volume['objectType'],
+ "name": volume['name'],
+ "storage_pool": storage_pool,
+ "host_types": set(host_types),
+ "host_port_information": host_port_information,
+ "host_port_protocols": set(host_port_protocols),
+ "hostside_io_interface_protocols": set(hostside_io_interface_protocols),
+ "id": volume['id'],
+ "wwn": volume['wwn'],
+ "eui": volume['extendedUniqueIdentifier'],
+ "workload_name": workload_name,
+ "workload_metadata": metadata,
+ "meta_data": metadata,
+ "volume_metadata": volume_metadata,
+ "raid_level": volume['raidLevel'],
+ "segment_size_kb": int(volume['segmentSize'] / 1024),
+ "stripe_count": stripe_count}
+ facts['netapp_volumes_by_initiators'][host['name']].append(volume_info)
+
+ # Use the base volume to populate related details for snapshot volumes.
+ for pit_view_volume in array_facts["highLevelVolBundle"]["pitView"]:
+ if volume["id"] == pit_view_volume["baseVol"]:
+ pit_view_volume_info = volume_info.copy()
+ pit_view_volume_info.update({"type": pit_view_volume["objectType"],
+ "name": pit_view_volume['name'],
+ "id": pit_view_volume['id'],
+ "wwn": pit_view_volume['wwn'],
+ "eui": pit_view_volume['extendedUniqueIdentifier']})
+ facts['netapp_volumes_by_initiators'][host['name']].append(pit_view_volume_info)
+
+ features = [feature for feature in array_facts['sa']['capabilities']]
+ features.extend([feature['capability'] for feature in array_facts['sa']['premiumFeatures']
+ if feature['isEnabled']])
+ features = list(set(features)) # ensure unique
+ features.sort()
+ facts['netapp_enabled_features'] = features
+
+ return facts
+
+ def get_facts(self):
+ """Get the embedded or web services proxy information."""
+ facts = self.get_array_facts()
+
+ facts_from_proxy = not self.is_embedded()
+ facts.update({"facts_from_proxy": facts_from_proxy})
+
+ self.module.exit_json(msg="Gathered facts for storage array. Array ID: [%s]." % self.ssid,
+ storage_array_facts=facts)
+
+
+def strip_interface_speed(speed):
+ """Converts symbol interface speeds to a more common notation. Example: 'speed10gig' -> '10g'"""
+ if isinstance(speed, list):
+ result = [re.match(r"speed[0-9]{1,3}[gm]", sp) for sp in speed]
+ result = [sp.group().replace("speed", "") if result else "unknown" for sp in result if sp]
+ result = ["auto" if re.match(r"auto", sp) else sp for sp in result]
+ else:
+ result = re.match(r"speed[0-9]{1,3}[gm]", speed)
+ result = result.group().replace("speed", "") if result else "unknown"
+ result = "auto" if re.match(r"auto", result.lower()) else result
+ return result
+
+
+def main():
+ facts = Facts()
+ facts.get_facts()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_firmware.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_firmware.py
new file mode 100644
index 000000000..fb7922362
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_firmware.py
@@ -0,0 +1,604 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+---
+module: na_santricity_firmware
+short_description: NetApp E-Series manage firmware.
+description:
+ - Ensure specific firmware versions are activated on E-Series storage system.
+author:
+ - Nathan Swartz (@ndswartz)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.santricity_doc
+options:
+ nvsram:
+ description:
+ - Path to the NVSRAM file.
+ - NetApp recommends upgrading the NVSRAM when upgrading firmware.
+ - Due to concurrency issues, use M(na_santricity_proxy_firmware_upload) to upload firmware and nvsram to SANtricity Web Services Proxy when
+ upgrading multiple systems at the same time on the same instance of the proxy.
+ type: str
+ required: false
+ firmware:
+ description:
+ - Path to the firmware file.
+ - Due to concurrency issues, use M(na_santricity_proxy_firmware_upload) to upload firmware and nvsram to SANtricity Web Services Proxy when
+ upgrading multiple systems at the same time on the same instance of the proxy.
+ type: str
+ required: True
+ wait_for_completion:
+ description:
+ - This flag will cause module to wait for any upgrade actions to complete.
+ - When changes are required to both firmware and nvsram and task is executed against SANtricity Web Services Proxy,
+ the firmware will have to complete before nvsram can be installed.
+ type: bool
+ default: false
+ clear_mel_events:
+ description:
+ - This flag will force firmware to be activated in spite of the storage system mel-event issues.
+ - Warning! This will clear all storage system mel-events. Use at your own risk!
+ type: bool
+ default: false
+"""
+EXAMPLES = """
+- name: Ensure correct firmware versions
+ na_santricity_firmware:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ nvsram: "path/to/nvsram"
+ firmware: "path/to/bundle"
+ wait_for_completion: true
+ clear_mel_events: true
+- name: Ensure correct firmware versions
+ na_santricity_firmware:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ nvsram: "path/to/nvsram"
+ firmware: "path/to/firmware"
+"""
+RETURN = """
+msg:
+ description: Status and version of firmware and NVSRAM.
+ type: str
+ returned: always
+ sample:
+"""
+import os
+import multiprocessing
+import threading
+
+from time import sleep
+from ansible.module_utils import six
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule, create_multipart_formdata, request
+from ansible.module_utils._text import to_native
+
+
+class NetAppESeriesFirmware(NetAppESeriesModule):
+ COMPATIBILITY_CHECK_TIMEOUT_SEC = 60
+ REBOOT_TIMEOUT_SEC = 30 * 60
+ MINIMUM_PROXY_VERSION = "04.10.00.0000"
+
+ def __init__(self):
+ ansible_options = dict(
+ nvsram=dict(type="str", required=False),
+ firmware=dict(type="str", required=True),
+ wait_for_completion=dict(type="bool", default=False),
+ clear_mel_events=dict(type="bool", default=False))
+
+ super(NetAppESeriesFirmware, self).__init__(ansible_options=ansible_options,
+ web_services_version="02.00.0000.0000",
+ supports_check_mode=True)
+
+ args = self.module.params
+ self.nvsram = args["nvsram"]
+ self.firmware = args["firmware"]
+ self.wait_for_completion = args["wait_for_completion"]
+ self.clear_mel_events = args["clear_mel_events"]
+
+ self.nvsram_name = None
+ self.firmware_name = None
+ self.is_bundle_cache = None
+ self.firmware_version_cache = None
+ self.nvsram_version_cache = None
+ self.upgrade_required = False
+ self.upgrade_in_progress = False
+ self.module_info = dict()
+
+ if self.nvsram:
+ self.nvsram_name = os.path.basename(self.nvsram)
+ if self.firmware:
+ self.firmware_name = os.path.basename(self.firmware)
+
+ self.last_known_event = -1
+ self.is_firmware_activation_started_mel_event_count = 1
+ self.is_nvsram_download_completed_mel_event_count = 1
+ self.proxy_wait_for_upgrade_mel_event_count = 1
+
+ def is_upgrade_in_progress(self):
+ """Determine whether an upgrade is already in progress."""
+ in_progress = False
+
+ if self.is_proxy():
+ try:
+ rc, status = self.request("storage-systems/%s/cfw-upgrade" % self.ssid)
+ in_progress = status["running"]
+ except Exception as error:
+ if "errorMessage" in to_native(error):
+ self.module.warn("Failed to retrieve upgrade status. Array [%s]. Error [%s]." % (self.ssid, error))
+ in_progress = False
+ else:
+ self.module.fail_json(msg="Failed to retrieve upgrade status. Array [%s]. Error [%s]." % (self.ssid, error))
+ else:
+ in_progress = False
+
+ return in_progress
+
+ def is_firmware_bundled(self):
+ """Determine whether supplied firmware is bundle."""
+ if self.is_bundle_cache is None:
+ with open(self.firmware, "rb") as fh:
+ signature = fh.read(16).lower()
+
+ if b"firmware" in signature:
+ self.is_bundle_cache = False
+ elif b"combined_content" in signature:
+ self.is_bundle_cache = True
+ else:
+ self.module.fail_json(msg="Firmware file is invalid. File [%s]. Array [%s]" % (self.firmware, self.ssid))
+
+ return self.is_bundle_cache
+
+ def firmware_version(self):
+ """Retrieve firmware version of the firmware file. Return: bytes string"""
+ if self.firmware_version_cache is None:
+
+ # Search firmware file for bundle or firmware version
+ with open(self.firmware, "rb") as fh:
+ line = fh.readline()
+ while line:
+ if self.is_firmware_bundled():
+ if b'displayableAttributeList=' in line:
+ for item in line[25:].split(b','):
+ key, value = item.split(b"|")
+ if key == b'VERSION':
+ self.firmware_version_cache = value.strip(b"\n")
+ break
+ elif b"Version:" in line:
+ self.firmware_version_cache = line.split()[-1].strip(b"\n")
+ break
+ line = fh.readline()
+ else:
+ self.module.fail_json(msg="Failed to determine firmware version. File [%s]. Array [%s]." % (self.firmware, self.ssid))
+ return self.firmware_version_cache
+
+ def nvsram_version(self):
+ """Retrieve NVSRAM version of the NVSRAM file. Return: byte string"""
+ if self.nvsram_version_cache is None:
+
+ with open(self.nvsram, "rb") as fh:
+ line = fh.readline()
+ while line:
+ if b".NVSRAM Configuration Number" in line:
+ self.nvsram_version_cache = line.split(b'"')[-2]
+ break
+ line = fh.readline()
+ else:
+ self.module.fail_json(msg="Failed to determine NVSRAM file version. File [%s]. Array [%s]." % (self.nvsram, self.ssid))
+ return self.nvsram_version_cache
+
+ def check_system_health(self):
+ """Ensure E-Series storage system is healthy. Works for both embedded and proxy web services."""
+ try:
+ rc, response = self.request("storage-systems/%s/health-check" % self.ssid, method="POST")
+ return response["successful"]
+ except Exception as error:
+ self.module.fail_json(msg="Health check failed! Array Id [%s]. Error[%s]." % (self.ssid, to_native(error)))
+
+ def embedded_check_compatibility(self):
+ """Verify files are compatible with E-Series storage system."""
+ if self.nvsram:
+ self.embedded_check_nvsram_compatibility()
+ if self.firmware:
+ self.embedded_check_bundle_compatibility()
+
+ def embedded_check_nvsram_compatibility(self):
+ """Verify the provided NVSRAM is compatible with E-Series storage system."""
+ files = [("nvsramimage", self.nvsram_name, self.nvsram)]
+ headers, data = create_multipart_formdata(files=files)
+ compatible = {}
+ try:
+ rc, compatible = self.request("firmware/embedded-firmware/%s/nvsram-compatibility-check" % self.ssid, method="POST", data=data, headers=headers)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve NVSRAM compatibility results. Array Id [%s]. Error[%s]." % (self.ssid, to_native(error)))
+
+ if not compatible["signatureTestingPassed"]:
+ self.module.fail_json(msg="Invalid NVSRAM file. File [%s]." % self.nvsram)
+ if not compatible["fileCompatible"]:
+ self.module.fail_json(msg="Incompatible NVSRAM file. File [%s]." % self.nvsram)
+
+ # Determine whether nvsram upgrade is required
+ for module in compatible["versionContents"]:
+ if module["bundledVersion"] != module["onboardVersion"]:
+ self.upgrade_required = True
+
+ # Update bundle info
+ self.module_info.update({module["module"]: {"onboard_version": module["onboardVersion"], "bundled_version": module["bundledVersion"]}})
+
+ def embedded_check_bundle_compatibility(self):
+ """Verify the provided firmware bundle is compatible with E-Series storage system."""
+ files = [("files[]", "blob", self.firmware)]
+ headers, data = create_multipart_formdata(files=files, send_8kb=True)
+ compatible = {}
+ try:
+ rc, compatible = self.request("firmware/embedded-firmware/%s/bundle-compatibility-check" % self.ssid, method="POST", data=data, headers=headers)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve bundle compatibility results. Array Id [%s]. Error[%s]." % (self.ssid, to_native(error)))
+
+ # Determine whether valid and compatible firmware
+ if not compatible["signatureTestingPassed"]:
+ self.module.fail_json(msg="Invalid firmware bundle file. File [%s]." % self.firmware)
+ if not compatible["fileCompatible"]:
+ self.module.fail_json(msg="Incompatible firmware bundle file. File [%s]." % self.firmware)
+
+ # Determine whether bundle upgrade is required
+ for module in compatible["versionContents"]:
+ bundle_module_version = module["bundledVersion"].split(".")
+ onboard_module_version = module["onboardVersion"].split(".")
+ version_minimum_length = min(len(bundle_module_version), len(onboard_module_version))
+
+ if bundle_module_version[:version_minimum_length] != onboard_module_version[:version_minimum_length]:
+ self.upgrade_required = True
+
+ # Build the modules information for logging purposes
+ self.module_info.update({module["module"]: {"onboard_version": module["onboardVersion"], "bundled_version": module["bundledVersion"]}})
+
+ def embedded_firmware_activate(self):
+ """Activate firmware."""
+ rc, response = self.request("firmware/embedded-firmware/activate", method="POST", ignore_errors=True, timeout=10)
+ if rc == "422":
+ self.module.fail_json(msg="Failed to activate the staged firmware. Array Id [%s]. Error [%s]" % (self.ssid, response))
+
+ def embedded_firmware_download(self):
+ """Execute the firmware download."""
+ if self.nvsram:
+ firmware_url = "firmware/embedded-firmware?nvsram=true&staged=true"
+ headers, data = create_multipart_formdata(files=[("nvsramfile", self.nvsram_name, self.nvsram),
+ ("dlpfile", self.firmware_name, self.firmware)])
+ else:
+ firmware_url = "firmware/embedded-firmware?nvsram=false&staged=true"
+ headers, data = create_multipart_formdata(files=[("dlpfile", self.firmware_name, self.firmware)])
+
+ # Stage firmware and nvsram
+ try:
+
+ rc, response = self.request(firmware_url, method="POST", data=data, headers=headers, timeout=(30 * 60))
+ except Exception as error:
+ self.module.fail_json(msg="Failed to stage firmware. Array Id [%s]. Error[%s]." % (self.ssid, to_native(error)))
+
+ # Activate firmware
+ activate_thread = threading.Thread(target=self.embedded_firmware_activate)
+ activate_thread.start()
+ self.wait_for_reboot()
+
+ def wait_for_reboot(self):
+ """Wait for controller A to fully reboot and web services running"""
+ reboot_started = False
+ reboot_completed = False
+ self.module.log("Controller firmware: Reboot commencing. Array Id [%s]." % self.ssid)
+ while self.wait_for_completion and not (reboot_started and reboot_completed):
+ try:
+ rc, response = self.request("storage-systems/%s/symbol/pingController?controller=a&verboseErrorResponse=true"
+ % self.ssid, method="POST", timeout=10, log_request=False)
+
+ if reboot_started and response == "ok":
+ self.module.log("Controller firmware: Reboot completed. Array Id [%s]." % self.ssid)
+ reboot_completed = True
+ sleep(2)
+ except Exception as error:
+ if not reboot_started:
+ self.module.log("Controller firmware: Reboot started. Array Id [%s]." % self.ssid)
+ reboot_started = True
+ continue
+
+ def firmware_event_logger(self):
+ """Determine if firmware activation has started."""
+ # Determine the last known event
+ try:
+ rc, events = self.request("storage-systems/%s/events" % self.ssid)
+ for event in events:
+ if int(event["eventNumber"]) > int(self.last_known_event):
+ self.last_known_event = event["eventNumber"]
+ except Exception as error:
+ self.module.fail_json(msg="Failed to determine last known event. Array Id [%s]. Error[%s]." % (self.ssid, to_native(error)))
+
+ while True:
+ try:
+ rc, events = self.request("storage-systems/%s/events?lastKnown=%s&wait=1" % (self.ssid, self.last_known_event), log_request=False)
+ for event in events:
+ if int(event["eventNumber"]) > int(self.last_known_event):
+ self.last_known_event = event["eventNumber"]
+
+ # Log firmware events
+ if event["eventType"] == "firmwareDownloadEvent":
+ self.module.log("%s" % event["status"])
+ if event["status"] == "informational" and event["statusMessage"]:
+ self.module.log("Controller firmware: %s Array Id [%s]." % (event["statusMessage"], self.ssid))
+
+ # When activation is successful, finish thread
+ if event["status"] == "activate_success":
+ self.module.log("Controller firmware activated. Array Id [%s]." % self.ssid)
+ return
+ except Exception as error:
+ pass
+
+ def wait_for_web_services(self):
+ """Wait for web services to report firmware and nvsram upgrade."""
+ # Wait for system to reflect changes
+ for count in range(int(self.REBOOT_TIMEOUT_SEC / 5)):
+ try:
+ if self.is_firmware_bundled():
+ firmware_rc, firmware_version = self.request("storage-systems/%s/graph/xpath-filter?query=/controller/"
+ "codeVersions[codeModule='bundleDisplay']" % self.ssid, log_request=False)
+ current_firmware_version = six.b(firmware_version[0]["versionString"])
+ else:
+ firmware_rc, firmware_version = self.request("storage-systems/%s/graph/xpath-filter?query=/sa/saData/fwVersion"
+ % self.ssid, log_request=False)
+ current_firmware_version = six.b(firmware_version[0])
+
+ nvsram_rc, nvsram_version = self.request("storage-systems/%s/graph/xpath-filter?query=/sa/saData/nvsramVersion" % self.ssid, log_request=False)
+ current_nvsram_version = six.b(nvsram_version[0])
+
+ if current_firmware_version == self.firmware_version() and (not self.nvsram or current_nvsram_version == self.nvsram_version()):
+ break
+ except Exception as error:
+ pass
+ sleep(5)
+ else:
+ self.module.fail_json(msg="Timeout waiting for Santricity Web Services. Array [%s]" % self.ssid)
+
+ # Wait for system to be optimal
+ for count in range(int(self.REBOOT_TIMEOUT_SEC / 5)):
+ try:
+ rc, response = self.request("storage-systems/%s" % self.ssid, log_request=False)
+
+ if response["status"] == "optimal":
+ self.upgrade_in_progress = False
+ break
+ except Exception as error:
+ pass
+ sleep(5)
+ else:
+ self.module.fail_json(msg="Timeout waiting for storage system to return to optimal status. Array [%s]" % self.ssid)
+
+ def embedded_upgrade(self):
+ """Upload and activate both firmware and NVSRAM."""
+ download_thread = threading.Thread(target=self.embedded_firmware_download)
+ event_thread = threading.Thread(target=self.firmware_event_logger)
+ download_thread.start()
+ event_thread.start()
+ download_thread.join()
+ event_thread.join()
+
+ def proxy_check_nvsram_compatibility(self, retries=10):
+ """Verify nvsram is compatible with E-Series storage system."""
+ self.module.log("Checking nvsram compatibility...")
+ data = {"storageDeviceIds": [self.ssid]}
+ try:
+ rc, check = self.request("firmware/compatibility-check", method="POST", data=data)
+ except Exception as error:
+ if retries:
+ sleep(1)
+ self.proxy_check_nvsram_compatibility(retries - 1)
+ else:
+ self.module.fail_json(msg="Failed to receive NVSRAM compatibility information. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ for count in range(int(self.COMPATIBILITY_CHECK_TIMEOUT_SEC / 5)):
+ try:
+ rc, response = self.request("firmware/compatibility-check?requestId=%s" % check["requestId"])
+ except Exception as error:
+ continue
+
+ if not response["checkRunning"]:
+ for result in response["results"][0]["nvsramFiles"]:
+ if result["filename"] == self.nvsram_name:
+ return
+ self.module.fail_json(msg="NVSRAM is not compatible. NVSRAM [%s]. Array [%s]." % (self.nvsram_name, self.ssid))
+ sleep(5)
+
+ self.module.fail_json(msg="Failed to retrieve NVSRAM status update from proxy. Array [%s]." % self.ssid)
+
+ def proxy_check_firmware_compatibility(self, retries=10):
+ """Verify firmware is compatible with E-Series storage system."""
+ check = {}
+ try:
+ rc, check = self.request("firmware/compatibility-check", method="POST", data={"storageDeviceIds": [self.ssid]})
+ except Exception as error:
+ if retries:
+ sleep(1)
+ self.proxy_check_firmware_compatibility(retries - 1)
+ else:
+ self.module.fail_json(msg="Failed to receive firmware compatibility information. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ for count in range(int(self.COMPATIBILITY_CHECK_TIMEOUT_SEC / 5)):
+ try:
+ rc, response = self.request("firmware/compatibility-check?requestId=%s" % check["requestId"])
+ except Exception as error:
+ continue
+
+ if not response["checkRunning"]:
+ for result in response["results"][0]["cfwFiles"]:
+ if result["filename"] == self.firmware_name:
+ return
+ self.module.fail_json(msg="Firmware bundle is not compatible. firmware [%s]. Array [%s]." % (self.firmware_name, self.ssid))
+ sleep(5)
+
+ self.module.fail_json(msg="Failed to retrieve firmware status update from proxy. Array [%s]." % self.ssid)
+
+ def proxy_upload_and_check_compatibility(self):
+ """Ensure firmware/nvsram file is uploaded and verify compatibility."""
+ uploaded_files = []
+ try:
+ rc, uploaded_files = self.request("firmware/cfw-files")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve uploaded firmware and nvsram files. Error [%s]" % to_native(error))
+
+ if self.firmware:
+ for uploaded_file in uploaded_files:
+ if uploaded_file["filename"] == self.firmware_name:
+ break
+ else:
+ fields = [("validate", "true")]
+ files = [("firmwareFile", self.firmware_name, self.firmware)]
+ headers, data = create_multipart_formdata(files=files, fields=fields)
+ try:
+ rc, response = self.request("firmware/upload", method="POST", data=data, headers=headers)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to upload firmware bundle file. File [%s]. Array [%s]. Error [%s]."
+ % (self.firmware_name, self.ssid, to_native(error)))
+ self.proxy_check_firmware_compatibility()
+
+ if self.nvsram:
+ for uploaded_file in uploaded_files:
+ if uploaded_file["filename"] == self.nvsram_name:
+ break
+ else:
+ fields = [("validate", "true")]
+ files = [("firmwareFile", self.nvsram_name, self.nvsram)]
+ headers, data = create_multipart_formdata(files=files, fields=fields)
+ try:
+ rc, response = self.request("firmware/upload", method="POST", data=data, headers=headers)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to upload NVSRAM file. File [%s]. Array [%s]. Error [%s]."
+ % (self.nvsram_name, self.ssid, to_native(error)))
+ self.proxy_check_nvsram_compatibility()
+
+ def proxy_check_upgrade_required(self):
+ """Determine whether the onboard firmware/nvsram version is the same as the file"""
+ # Verify controller consistency and get firmware versions
+ if self.firmware:
+ current_firmware_version = b""
+ try:
+ # Retrieve current bundle version
+ if self.is_firmware_bundled():
+ rc, response = self.request("storage-systems/%s/graph/xpath-filter?query=/controller/codeVersions[codeModule='bundleDisplay']" % self.ssid)
+ current_firmware_version = six.b(response[0]["versionString"])
+ else:
+ rc, response = self.request("storage-systems/%s/graph/xpath-filter?query=/sa/saData/fwVersion" % self.ssid)
+ current_firmware_version = six.b(response[0])
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve controller firmware information. Array [%s]. Error [%s]" % (self.ssid, to_native(error)))
+
+ # Determine whether the current firmware version is the same as the file
+ new_firmware_version = self.firmware_version()
+ if current_firmware_version != new_firmware_version:
+ self.upgrade_required = True
+
+ # Build the modules information for logging purposes
+ self.module_info.update({"bundleDisplay": {"onboard_version": current_firmware_version, "bundled_version": new_firmware_version}})
+
+ # Determine current NVSRAM version and whether change is required
+ if self.nvsram:
+ try:
+ rc, response = self.request("storage-systems/%s/graph/xpath-filter?query=/sa/saData/nvsramVersion" % self.ssid)
+
+ if six.b(response[0]) != self.nvsram_version():
+ self.upgrade_required = True
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve storage system's NVSRAM version. Array [%s]. Error [%s]" % (self.ssid, to_native(error)))
+
+ def proxy_wait_for_upgrade(self):
+ """Wait for SANtricity Web Services Proxy to report upgrade complete"""
+ self.module.log("(Proxy) Waiting for upgrade to complete...")
+
+ status = {}
+ while True:
+ try:
+ rc, status = self.request("storage-systems/%s/cfw-upgrade" % self.ssid, log_request=False, ignore_errors=True)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve firmware upgrade status! Array [%s]. Error[%s]." % (self.ssid, to_native(error)))
+
+ if "errorMessage" in status:
+ self.module.warn("Proxy reported an error. Checking whether upgrade completed. Array [%s]. Error [%s]." % (self.ssid, status["errorMessage"]))
+ self.wait_for_web_services()
+ break
+
+ if not status["running"]:
+ if status["activationCompletionTime"]:
+ self.upgrade_in_progress = False
+ break
+ else:
+ self.module.fail_json(msg="Failed to complete upgrade. Array [%s]." % self.ssid)
+ sleep(5)
+
+ def delete_mel_events(self):
+ """Clear all mel-events."""
+ try:
+ rc, response = self.request("storage-systems/%s/mel-events?clearCache=true&resetMel=true" % self.ssid, method="DELETE")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to clear mel-events. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ def proxy_upgrade(self):
+ """Activate previously uploaded firmware related files."""
+ self.module.log("(Proxy) Firmware upgrade commencing...")
+ body = {"stageFirmware": False, "skipMelCheck": self.clear_mel_events, "cfwFile": self.firmware_name}
+ if self.nvsram:
+ body.update({"nvsramFile": self.nvsram_name})
+
+ try:
+ rc, response = self.request("storage-systems/%s/cfw-upgrade" % self.ssid, method="POST", data=body)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to initiate firmware upgrade. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ self.upgrade_in_progress = True
+ if self.wait_for_completion:
+ self.proxy_wait_for_upgrade()
+
+ def apply(self):
+ """Upgrade controller firmware."""
+ if self.is_upgrade_in_progress():
+ self.module.fail_json(msg="Upgrade is already is progress. Array [%s]." % self.ssid)
+
+ if self.is_embedded():
+ self.embedded_check_compatibility()
+ else:
+ if not self.is_web_services_version_met(self.MINIMUM_PROXY_VERSION):
+ self.module.fail_json(msg="Minimum proxy version %s required!")
+ self.proxy_check_upgrade_required()
+
+ # This will upload the firmware files to the web services proxy but not to the controller
+ if self.upgrade_required:
+ self.proxy_upload_and_check_compatibility()
+
+ # Perform upgrade
+ if self.upgrade_required and not self.module.check_mode:
+
+ if self.clear_mel_events:
+ self.delete_mel_events()
+
+ if self.is_embedded():
+ self.embedded_upgrade()
+ else:
+ self.proxy_upgrade()
+
+ self.module.exit_json(changed=self.upgrade_required, upgrade_in_process=self.upgrade_in_progress, modules_info=self.module_info)
+
+
+def main():
+ firmware = NetAppESeriesFirmware()
+ firmware.apply()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_global.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_global.py
new file mode 100644
index 000000000..030eb3b1f
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_global.py
@@ -0,0 +1,506 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = """
+---
+module: na_santricity_global
+short_description: NetApp E-Series manage global settings configuration
+description:
+ - Allow the user to configure several of the global settings associated with an E-Series storage-system
+author:
+ - Michael Price (@lmprice)
+ - Nathan Swartz (@ndswartz)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.santricity_doc
+options:
+ name:
+ description:
+ - Set the name of the E-Series storage-system
+ - This label/name doesn't have to be unique.
+ - May be up to 30 characters in length.
+ type: str
+ aliases:
+ - label
+ cache_block_size:
+ description:
+ - Size of the cache's block size.
+ - All volumes on the storage system share the same cache space; therefore, the volumes can have only one cache block size.
+ - See M(na_santricity_facts) for available sizes.
+ type: int
+ required: False
+ cache_flush_threshold:
+ description:
+ - This is the percentage threshold of the amount of unwritten data that is allowed to remain on the storage array's cache before flushing.
+ type: int
+ required: False
+ default_host_type:
+ description:
+ - Default host type for the storage system.
+ - Either one of the following names can be specified, Linux DM-MP, VMWare, Windows, Windows Clustered, or a
+ host type index which can be found in M(na_santricity_facts)
+ type: str
+ required: False
+ automatic_load_balancing:
+ description:
+ - Enable automatic load balancing to allow incoming traffic from the hosts to be dynamically managed and balanced across both controllers.
+ - Automatic load balancing requires host connectivity reporting to be enabled.
+ type: str
+ choices:
+ - enabled
+ - disabled
+ required: False
+ host_connectivity_reporting:
+ description:
+ - Enable host connectivity reporting to allow host connections to be monitored for connection and multipath driver problems.
+ - When M(automatic_load_balancing==enabled) then M(host_connectivity_reporting) must be enabled
+ type: str
+ choices:
+ - enabled
+ - disabled
+ required: False
+ login_banner_message:
+ description:
+ - Text message that appears prior to the login page.
+ - I(login_banner_message=="") will delete any existing banner message.
+ type: str
+ required: False
+ controller_shelf_id:
+ description:
+ - This is the identifier for the drive enclosure containing the controllers.
+ type: int
+ required: false
+ default: 0
+notes:
+ - Check mode is supported.
+ - This module requires Web Services API v1.3 or newer.
+"""
+
+EXAMPLES = """
+ - name: Set the storage-system name
+ na_santricity_global:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ name: myArrayName
+ cache_block_size: 32768
+ cache_flush_threshold: 80
+ automatic_load_balancing: enabled
+ default_host_type: Linux DM-MP
+ - name: Set the storage-system name
+ na_santricity_global:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ name: myOtherArrayName
+ cache_block_size: 8192
+ cache_flush_threshold: 60
+ automatic_load_balancing: disabled
+ default_host_type: 28
+"""
+
+RETURN = """
+changed:
+ description: Whether global settings were changed
+ returned: on success
+ type: bool
+ sample: true
+array_name:
+ description: Current storage array's name
+ returned: on success
+ type: str
+ sample: arrayName
+automatic_load_balancing:
+ description: Whether automatic load balancing feature has been enabled
+ returned: on success
+ type: str
+ sample: enabled
+host_connectivity_reporting:
+ description: Whether host connectivity reporting feature has been enabled
+ returned: on success
+ type: str
+ sample: enabled
+cache_settings:
+ description: Current cache block size and flushing threshold values
+ returned: on success
+ type: dict
+ sample: {"cache_block_size": 32768, "cache_flush_threshold": 80}
+default_host_type_index:
+ description: Current default host type index
+ returned: on success
+ type: int
+ sample: 28
+login_banner_message:
+ description: Current banner message
+ returned: on success
+ type: str
+ sample: "Banner message here!"
+controller_shelf_id:
+ description: Identifier for the drive enclosure containing the controllers.
+ returned: on success
+ type: int
+ sample: 99
+"""
+import random
+import sys
+
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule, create_multipart_formdata
+from ansible.module_utils import six
+from ansible.module_utils._text import to_native
+try:
+ from ansible.module_utils.ansible_release import __version__ as ansible_version
+except ImportError:
+ ansible_version = 'unknown'
+
+
+class NetAppESeriesGlobalSettings(NetAppESeriesModule):
+ MAXIMUM_LOGIN_BANNER_SIZE_BYTES = 5 * 1024
+ LAST_AVAILABLE_CONTROLLER_SHELF_ID = 99
+
+ def __init__(self):
+ version = "02.00.0000.0000"
+ ansible_options = dict(cache_block_size=dict(type="int", require=False),
+ cache_flush_threshold=dict(type="int", required=False),
+ default_host_type=dict(type="str", require=False),
+ automatic_load_balancing=dict(type="str", choices=["enabled", "disabled"], required=False),
+ host_connectivity_reporting=dict(type="str", choices=["enabled", "disabled"], required=False),
+ name=dict(type='str', required=False, aliases=['label']),
+ login_banner_message=dict(type='str', required=False),
+ controller_shelf_id=dict(type="int", required=False, default=0))
+
+ super(NetAppESeriesGlobalSettings, self).__init__(ansible_options=ansible_options,
+ web_services_version=version,
+ supports_check_mode=True)
+ args = self.module.params
+ self.name = args["name"]
+ self.cache_block_size = args["cache_block_size"]
+ self.cache_flush_threshold = args["cache_flush_threshold"]
+ self.host_type_index = args["default_host_type"]
+ self.controller_shelf_id = args["controller_shelf_id"]
+
+ self.login_banner_message = None
+ if args["login_banner_message"] is not None:
+ self.login_banner_message = args["login_banner_message"].rstrip("\n")
+
+ self.autoload_enabled = None
+ if args["automatic_load_balancing"]:
+ self.autoload_enabled = args["automatic_load_balancing"] == "enabled"
+
+ self.host_connectivity_reporting_enabled = None
+ if args["host_connectivity_reporting"]:
+ self.host_connectivity_reporting_enabled = args["host_connectivity_reporting"] == "enabled"
+ elif self.autoload_enabled:
+ self.host_connectivity_reporting_enabled = True
+
+ if self.autoload_enabled and not self.host_connectivity_reporting_enabled:
+ self.module.fail_json(msg="Option automatic_load_balancing requires host_connectivity_reporting to be enabled. Array [%s]." % self.ssid)
+
+ self.current_configuration_cache = None
+
+ def get_current_configuration(self, update=False):
+ """Retrieve the current storage array's global configuration."""
+ if self.current_configuration_cache is None or update:
+ self.current_configuration_cache = dict()
+
+ # Get the storage array's capabilities and available options
+ try:
+ rc, capabilities = self.request("storage-systems/%s/capabilities" % self.ssid)
+ self.current_configuration_cache["autoload_capable"] = "capabilityAutoLoadBalancing" in capabilities["productCapabilities"]
+ self.current_configuration_cache["cache_block_size_options"] = capabilities["featureParameters"]["cacheBlockSizes"]
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve storage array capabilities. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ try:
+ rc, host_types = self.request("storage-systems/%s/host-types" % self.ssid)
+ self.current_configuration_cache["host_type_options"] = dict()
+ for host_type in host_types:
+ self.current_configuration_cache["host_type_options"].update({host_type["code"].lower(): host_type["index"]})
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve storage array host options. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ # Get the current cache settings
+ try:
+ rc, settings = self.request("storage-systems/%s/graph/xpath-filter?query=/sa" % self.ssid)
+ self.current_configuration_cache["cache_settings"] = {"cache_block_size": settings[0]["cache"]["cacheBlkSize"],
+ "cache_flush_threshold": settings[0]["cache"]["demandFlushThreshold"]}
+ self.current_configuration_cache["default_host_type_index"] = settings[0]["defaultHostTypeIndex"]
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve cache settings. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ try:
+ rc, array_info = self.request("storage-systems/%s" % self.ssid)
+ self.current_configuration_cache["autoload_enabled"] = array_info["autoLoadBalancingEnabled"]
+ self.current_configuration_cache["host_connectivity_reporting_enabled"] = array_info["hostConnectivityReportingEnabled"]
+ self.current_configuration_cache["name"] = array_info['name']
+ except Exception as error:
+ self.module.fail_json(msg="Failed to determine current configuration. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ try:
+ rc, login_banner_message = self.request("storage-systems/%s/login-banner?asFile=false" % self.ssid, ignore_errors=True, json_response=False,
+ headers={"Accept": "application/octet-stream", "netapp-client-type": "Ansible-%s" % ansible_version})
+ self.current_configuration_cache["login_banner_message"] = login_banner_message.decode("utf-8").rstrip("\n")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to determine current login banner message. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ try:
+ rc, hardware_inventory = self.request("storage-systems/%s/hardware-inventory" % self.ssid)
+ self.current_configuration_cache["controller_shelf_reference"] = hardware_inventory["trays"][0]["trayRef"]
+ self.current_configuration_cache["controller_shelf_id"] = hardware_inventory["trays"][0]["trayId"]
+ self.current_configuration_cache["used_shelf_ids"] = [tray["trayId"] for tray in hardware_inventory["trays"]]
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve controller shelf identifier. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ return self.current_configuration_cache
+
+ def change_cache_block_size_required(self):
+ """Determine whether cache block size change is required."""
+ if self.cache_block_size is None:
+ return False
+
+ current_configuration = self.get_current_configuration()
+ current_available_block_sizes = current_configuration["cache_block_size_options"]
+ if self.cache_block_size not in current_available_block_sizes:
+ self.module.fail_json(msg="Invalid cache block size. Array [%s]. Available cache block sizes [%s]." % (self.ssid, current_available_block_sizes))
+
+ return self.cache_block_size != current_configuration["cache_settings"]["cache_block_size"]
+
+ def change_cache_flush_threshold_required(self):
+ """Determine whether cache flush percentage change is required."""
+ if self.cache_flush_threshold is None:
+ return False
+
+ current_configuration = self.get_current_configuration()
+ if self.cache_flush_threshold <= 0 or self.cache_flush_threshold >= 100:
+ self.module.fail_json(msg="Invalid cache flushing threshold, it must be equal to or between 0 and 100. Array [%s]" % self.ssid)
+
+ return self.cache_flush_threshold != current_configuration["cache_settings"]["cache_flush_threshold"]
+
+ def change_host_type_required(self):
+ """Determine whether default host type change is required."""
+ if self.host_type_index is None:
+ return False
+
+ current_configuration = self.get_current_configuration()
+ current_available_host_types = current_configuration["host_type_options"]
+ if isinstance(self.host_type_index, str):
+ self.host_type_index = self.host_type_index.lower()
+
+ if self.host_type_index in self.HOST_TYPE_INDEXES.keys():
+ self.host_type_index = self.HOST_TYPE_INDEXES[self.host_type_index]
+ elif self.host_type_index in current_available_host_types.keys():
+ self.host_type_index = current_available_host_types[self.host_type_index]
+
+ if self.host_type_index not in current_available_host_types.values():
+ self.module.fail_json(msg="Invalid host type index! Array [%s]. Available host options [%s]." % (self.ssid, current_available_host_types))
+
+ return int(self.host_type_index) != current_configuration["default_host_type_index"]
+
+ def change_autoload_enabled_required(self):
+ """Determine whether automatic load balancing state change is required."""
+ if self.autoload_enabled is None:
+ return False
+
+ change_required = False
+ current_configuration = self.get_current_configuration()
+ if self.autoload_enabled and not current_configuration["autoload_capable"]:
+ self.module.fail_json(msg="Automatic load balancing is not available. Array [%s]." % self.ssid)
+
+ if self.autoload_enabled:
+ if not current_configuration["autoload_enabled"] or not current_configuration["host_connectivity_reporting_enabled"]:
+ change_required = True
+ elif current_configuration["autoload_enabled"]:
+ change_required = True
+
+ return change_required
+
+ def change_host_connectivity_reporting_enabled_required(self):
+ """Determine whether host connectivity reporting state change is required."""
+ if self.host_connectivity_reporting_enabled is None:
+ return False
+
+ current_configuration = self.get_current_configuration()
+ return self.host_connectivity_reporting_enabled != current_configuration["host_connectivity_reporting_enabled"]
+
+ def change_name_required(self):
+ """Determine whether storage array name change is required."""
+ if self.name is None:
+ return False
+
+ current_configuration = self.get_current_configuration()
+ if self.name and len(self.name) > 30:
+ self.module.fail_json(msg="The provided name is invalid, it must be less than or equal to 30 characters in length. Array [%s]" % self.ssid)
+
+ return self.name != current_configuration["name"]
+
+ def change_login_banner_message_required(self):
+ """Determine whether storage array name change is required."""
+ if self.login_banner_message is None:
+ return False
+
+ current_configuration = self.get_current_configuration()
+ if self.login_banner_message and sys.getsizeof(self.login_banner_message) > self.MAXIMUM_LOGIN_BANNER_SIZE_BYTES:
+ self.module.fail_json(msg="The banner message is too long! It must be %s bytes. Array [%s]" % (self.MAXIMUM_LOGIN_BANNER_SIZE_BYTES, self.ssid))
+ return self.login_banner_message != current_configuration["login_banner_message"]
+
+ def change_controller_shelf_id_required(self):
+ """Determine whether storage array tray identifier change is required."""
+ current_configuration = self.get_current_configuration()
+ if self.controller_shelf_id is not None and self.controller_shelf_id != current_configuration["controller_shelf_id"]:
+
+ if self.controller_shelf_id in current_configuration["used_shelf_ids"]:
+ self.module.fail_json(msg="The controller_shelf_id is currently being used by another shelf. Used Identifiers: [%s]. Array [%s]." % (", ".join([str(id) for id in self.get_current_configuration()["used_shelf_ids"]]), self.ssid))
+
+ if self.controller_shelf_id < 0 or self.controller_shelf_id > self.LAST_AVAILABLE_CONTROLLER_SHELF_ID:
+ self.module.fail_json(msg="The controller_shelf_id must be 0-99 and not already used by another shelf. Used Identifiers: [%s]. Array [%s]." % (", ".join([str(id) for id in self.get_current_configuration()["used_shelf_ids"]]), self.ssid))
+
+ return True
+ return False
+
+ def update_cache_settings(self):
+ """Update cache block size and/or flushing threshold."""
+ current_configuration = self.get_current_configuration()
+ block_size = self.cache_block_size if self.cache_block_size else current_configuration["cache_settings"]["cache_block_size"]
+ threshold = self.cache_flush_threshold if self.cache_flush_threshold else current_configuration["cache_settings"]["cache_flush_threshold"]
+ try:
+ rc, cache_settings = self.request("storage-systems/%s/symbol/setSACacheParams?verboseErrorResponse=true" % self.ssid, method="POST",
+ data={"cacheBlkSize": block_size, "demandFlushAmount": threshold, "demandFlushThreshold": threshold})
+ except Exception as error:
+ self.module.fail_json(msg="Failed to set cache settings. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ def update_host_type(self):
+ """Update default host type."""
+ try:
+ rc, default_host_type = self.request("storage-systems/%s/symbol/setStorageArrayProperties?verboseErrorResponse=true" % self.ssid, method="POST",
+ data={"settings": {"defaultHostTypeIndex": self.host_type_index}})
+ except Exception as error:
+ self.module.fail_json(msg="Failed to set default host type. Array [%s]. Error [%s]" % (self.ssid, to_native(error)))
+
+ def update_autoload(self):
+ """Update automatic load balancing state."""
+ current_configuration = self.get_current_configuration()
+ if self.autoload_enabled and not current_configuration["host_connectivity_reporting_enabled"]:
+ try:
+ rc, host_connectivity_reporting = self.request("storage-systems/%s/symbol/setHostConnectivityReporting?verboseErrorResponse=true" % self.ssid,
+ method="POST", data={"enableHostConnectivityReporting": self.autoload_enabled})
+ except Exception as error:
+ self.module.fail_json(msg="Failed to enable host connectivity reporting which is needed for automatic load balancing state."
+ " Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ try:
+ rc, autoload = self.request("storage-systems/%s/symbol/setAutoLoadBalancing?verboseErrorResponse=true" % self.ssid,
+ method="POST", data={"enableAutoLoadBalancing": self.autoload_enabled})
+ except Exception as error:
+ self.module.fail_json(msg="Failed to set automatic load balancing state. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ def update_host_connectivity_reporting_enabled(self):
+ """Update automatic load balancing state."""
+ try:
+ rc, host_connectivity_reporting = self.request("storage-systems/%s/symbol/setHostConnectivityReporting?verboseErrorResponse=true" % self.ssid,
+ method="POST", data={"enableHostConnectivityReporting": self.host_connectivity_reporting_enabled})
+ except Exception as error:
+ self.module.fail_json(msg="Failed to enable host connectivity reporting. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ def update_name(self):
+ """Update storage array's name."""
+ try:
+ rc, result = self.request("storage-systems/%s/configuration" % self.ssid, method="POST", data={"name": self.name})
+ except Exception as err:
+ self.module.fail_json(msg="Failed to set the storage array name! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+
+ def update_login_banner_message(self):
+ """Update storage login banner message."""
+ if self.login_banner_message:
+ boundary = "---------------------------" + "".join([str(random.randint(0, 9)) for x in range(27)])
+ data_parts = list()
+ data = None
+
+ if six.PY2: # Generate payload for Python 2
+ newline = "\r\n"
+ data_parts.extend(["--%s" % boundary,
+ 'Content-Disposition: form-data; name="file"; filename="banner.txt"',
+ "Content-Type: text/plain",
+ "",
+ self.login_banner_message])
+ data_parts.extend(["--%s--" % boundary, ""])
+ data = newline.join(data_parts)
+
+ else:
+ newline = six.b("\r\n")
+ data_parts.extend([six.b("--%s" % boundary),
+ six.b('Content-Disposition: form-data; name="file"; filename="banner.txt"'),
+ six.b("Content-Type: text/plain"),
+ six.b(""),
+ six.b(self.login_banner_message)])
+ data_parts.extend([six.b("--%s--" % boundary), b""])
+ data = newline.join(data_parts)
+
+ headers = {"Content-Type": "multipart/form-data; boundary=%s" % boundary, "Content-Length": str(len(data))}
+
+ try:
+ rc, result = self.request("storage-systems/%s/login-banner" % self.ssid, method="POST", headers=headers, data=data)
+ except Exception as err:
+ self.module.fail_json(msg="Failed to set the storage system login banner message! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+ else:
+ try:
+ rc, result = self.request("storage-systems/%s/login-banner" % self.ssid, method="DELETE")
+ except Exception as err:
+ self.module.fail_json(msg="Failed to clear the storage system login banner message! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+
+ def update_controller_shelf_id(self):
+ """Update controller shelf tray identifier."""
+ current_configuration = self.get_current_configuration()
+ try:
+ rc, tray = self.request("storage-systems/%s/symbol/updateTray?verboseErrorResponse=true" % self.ssid, method="POST",
+ data={"ref": current_configuration["controller_shelf_reference"], "trayID": self.controller_shelf_id})
+ except Exception as error:
+ self.module.fail_json(msg="Failed to update controller shelf identifier. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ def update(self):
+ """Ensure the storage array's global setting are correctly set."""
+ change_required = False
+ if (self.change_autoload_enabled_required() or self.change_cache_block_size_required() or self.change_cache_flush_threshold_required() or
+ self.change_host_type_required() or self.change_name_required() or self.change_host_connectivity_reporting_enabled_required() or
+ self.change_login_banner_message_required() or self.change_controller_shelf_id_required()):
+ change_required = True
+
+ if change_required and not self.module.check_mode:
+ if self.change_autoload_enabled_required():
+ self.update_autoload()
+ if self.change_host_connectivity_reporting_enabled_required():
+ self.update_host_connectivity_reporting_enabled()
+ if self.change_cache_block_size_required() or self.change_cache_flush_threshold_required():
+ self.update_cache_settings()
+ if self.change_host_type_required():
+ self.update_host_type()
+ if self.change_name_required():
+ self.update_name()
+ if self.change_login_banner_message_required():
+ self.update_login_banner_message()
+ if self.change_controller_shelf_id_required():
+ self.update_controller_shelf_id()
+
+ current_configuration = self.get_current_configuration(update=True)
+ self.module.exit_json(changed=change_required,
+ cache_settings=current_configuration["cache_settings"],
+ default_host_type_index=current_configuration["default_host_type_index"],
+ automatic_load_balancing="enabled" if current_configuration["autoload_enabled"] else "disabled",
+ host_connectivity_reporting="enabled" if current_configuration["host_connectivity_reporting_enabled"] else "disabled",
+ array_name=current_configuration["name"],
+ login_banner_message=current_configuration["login_banner_message"],
+ controller_shelf_id=current_configuration["controller_shelf_id"])
+
+
+def main():
+ global_settings = NetAppESeriesGlobalSettings()
+ global_settings.update()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_host.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_host.py
new file mode 100644
index 000000000..0da00fcd0
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_host.py
@@ -0,0 +1,490 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+---
+module: na_santricity_host
+short_description: NetApp E-Series manage eseries hosts
+description: Create, update, remove hosts on NetApp E-series storage arrays
+author:
+ - Kevin Hulquest (@hulquest)
+ - Nathan Swartz (@ndswartz)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.santricity_doc
+options:
+ name:
+ description:
+ - If the host doesn't yet exist, the label/name to assign at creation time.
+ - If the hosts already exists, this will be used to uniquely identify the host to make any required changes
+ type: str
+ required: True
+ aliases:
+ - label
+ state:
+ description:
+ - Set to absent to remove an existing host
+ - Set to present to modify or create a new host definition
+ type: str
+ choices:
+ - absent
+ - present
+ default: present
+ host_type:
+ description:
+ - Host type includes operating system and multipath considerations.
+ - If not specified, the default host type will be utilized. Default host type can be set using M(netapp_eseries.santricity.na_santricity_global).
+ - For storage array specific options see M(netapp_eseries.santricity.na_santricity_facts).
+ - All values are case-insensitive.
+ - AIX MPIO - The Advanced Interactive Executive (AIX) OS and the native MPIO driver
+ - AVT 4M - Silicon Graphics, Inc. (SGI) proprietary multipath driver
+ - HP-UX - The HP-UX OS with native multipath driver
+ - Linux ATTO - The Linux OS and the ATTO Technology, Inc. driver (must use ATTO FC HBAs)
+ - Linux DM-MP - The Linux OS and the native DM-MP driver
+ - Linux Pathmanager - The Linux OS and the SGI proprietary multipath driver
+ - Mac - The Mac OS and the ATTO Technology, Inc. driver
+ - ONTAP - FlexArray
+ - Solaris 11 or later - The Solaris 11 or later OS and the native MPxIO driver
+ - Solaris 10 or earlier - The Solaris 10 or earlier OS and the native MPxIO driver
+ - SVC - IBM SAN Volume Controller
+ - VMware - ESXi OS
+ - Windows - Windows Server OS and Windows MPIO with a DSM driver
+ - Windows Clustered - Clustered Windows Server OS and Windows MPIO with a DSM driver
+ - Windows ATTO - Windows OS and the ATTO Technology, Inc. driver
+ type: str
+ required: False
+ aliases:
+ - host_type_index
+ ports:
+ description:
+ - A list of host ports you wish to associate with the host.
+ - Host ports are uniquely identified by their WWN or IQN. Their assignments to a particular host are
+ uniquely identified by a label and these must be unique.
+ type: list
+ required: False
+ suboptions:
+ type:
+ description:
+ - The interface type of the port to define.
+ - Acceptable choices depend on the capabilities of the target hardware/software platform.
+ required: true
+ choices:
+ - iscsi
+ - sas
+ - fc
+ - ib
+ - nvmeof
+ label:
+ description:
+ - A unique label to assign to this port assignment.
+ required: true
+ port:
+ description:
+ - The WWN or IQN of the hostPort to assign to this port definition.
+ required: true
+ force_port:
+ description:
+ - Allow ports that are already assigned to be re-assigned to your current host
+ required: false
+ type: bool
+"""
+
+EXAMPLES = """
+ - name: Define or update an existing host named "Host1"
+ na_santricity_host:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ name: "Host1"
+ state: present
+ host_type_index: Linux DM-MP
+ ports:
+ - type: "iscsi"
+ label: "PORT_1"
+ port: "iqn.1996-04.de.suse:01:56f86f9bd1fe"
+ - type: "fc"
+ label: "FC_1"
+ port: "10:00:FF:7C:FF:FF:FF:01"
+ - type: "fc"
+ label: "FC_2"
+ port: "10:00:FF:7C:FF:FF:FF:00"
+
+ - name: Ensure a host named "Host2" doesn"t exist
+ na_santricity_host:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ name: "Host2"
+ state: absent
+"""
+
+RETURN = """
+msg:
+ description:
+ - A user-readable description of the actions performed.
+ returned: on success
+ type: str
+ sample: The host has been created.
+id:
+ description:
+ - the unique identifier of the host on the E-Series storage-system
+ returned: on success when state=present
+ type: str
+ sample: 00000000600A098000AAC0C3003004700AD86A52
+ssid:
+ description:
+ - the unique identifer of the E-Series storage-system with the current api
+ returned: on success
+ type: str
+ sample: 1
+api_url:
+ description:
+ - the url of the API that this request was proccessed by
+ returned: on success
+ type: str
+ sample: https://webservices.example.com:8443
+"""
+import re
+
+from ansible.module_utils._text import to_native
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule
+
+
+class NetAppESeriesHost(NetAppESeriesModule):
+ PORT_TYPES = ["iscsi", "sas", "fc", "ib", "nvmeof"]
+
+ def __init__(self):
+ ansible_options = dict(state=dict(type="str", default="present", choices=["absent", "present"]),
+ ports=dict(type="list", required=False),
+ force_port=dict(type="bool", default=False),
+ name=dict(type="str", required=True, aliases=["label"]),
+ host_type=dict(type="str", required=False, aliases=["host_type_index"]))
+
+ super(NetAppESeriesHost, self).__init__(ansible_options=ansible_options,
+ web_services_version="02.00.0000.0000",
+ supports_check_mode=True)
+
+ self.check_mode = self.module.check_mode
+ args = self.module.params
+ self.ports = args["ports"]
+ self.force_port = args["force_port"]
+ self.name = args["name"]
+ self.state = args["state"]
+
+ self.post_body = dict()
+ self.all_hosts = list()
+ self.host_obj = dict()
+ self.new_ports = list()
+ self.ports_for_update = list()
+ self.ports_for_removal = list()
+
+ # Update host type with the corresponding index
+ host_type = args["host_type"]
+ if host_type:
+ host_type = host_type.lower()
+ if host_type in [key.lower() for key in list(self.HOST_TYPE_INDEXES.keys())]:
+ self.host_type_index = self.HOST_TYPE_INDEXES[host_type]
+ elif host_type.isdigit():
+ self.host_type_index = int(args["host_type"])
+ else:
+ self.module.fail_json(msg="host_type must be either a host type name or host type index found integer the documentation.")
+ else:
+ self.host_type_index = None
+
+ if not self.url.endswith("/"):
+ self.url += "/"
+
+ # Fix port representation if they are provided with colons
+ if self.ports is not None:
+ for port in self.ports:
+ port["type"] = port["type"].lower()
+ port["port"] = port["port"].lower()
+
+ if port["type"] not in self.PORT_TYPES:
+ self.module.fail_json(msg="Invalid port type! Port interface type must be one of [%s]." % ", ".join(self.PORT_TYPES))
+
+ # Determine whether address is 16-byte WWPN and, if so, remove
+ if re.match(r"^(0x)?[0-9a-f]{16}$", port["port"].replace(":", "")):
+ port["port"] = port["port"].replace(":", '').replace("0x", "")
+
+ if port["type"] == "ib":
+ port["port"] = "0" * (32 - len(port["port"])) + port["port"]
+
+ @property
+ def default_host_type(self):
+ """Return the default host type index."""
+ try:
+ rc, default_index = self.request("storage-systems/%s/graph/xpath-filter?query=/sa/defaultHostTypeIndex" % self.ssid)
+ return default_index[0]
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve default host type index")
+
+ @property
+ def valid_host_type(self):
+ host_types = None
+ try:
+ rc, host_types = self.request("storage-systems/%s/host-types" % self.ssid)
+ except Exception as err:
+ self.module.fail_json(msg="Failed to get host types. Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+
+ try:
+ match = list(filter(lambda host_type: host_type["index"] == self.host_type_index, host_types))[0]
+ return True
+ except IndexError:
+ self.module.fail_json(msg="There is no host type with index %s" % self.host_type_index)
+
+ def check_port_types(self):
+ """Check to see whether the port interface types are available on storage system."""
+ try:
+ rc, interfaces = self.request("storage-systems/%s/interfaces?channelType=hostside" % self.ssid)
+
+ for port in self.ports:
+ for interface in interfaces:
+
+ # Check for IB iSER
+ if port["type"] == "ib" and "iqn" in port["port"]:
+ if ((interface["ioInterfaceTypeData"]["interfaceType"] == "iscsi" and
+ interface["ioInterfaceTypeData"]["iscsi"]["interfaceData"]["type"] == "infiniband" and
+ interface["ioInterfaceTypeData"]["iscsi"]["interfaceData"]["infinibandData"]["isIser"]) or
+ (interface["ioInterfaceTypeData"]["interfaceType"] == "ib" and
+ interface["ioInterfaceTypeData"]["ib"]["isISERSupported"])):
+ port["type"] = "iscsi"
+ break
+ # Check for NVMe
+ elif (port["type"] == "nvmeof" and "commandProtocolPropertiesList" in interface and
+ "commandProtocolProperties" in interface["commandProtocolPropertiesList"] and
+ interface["commandProtocolPropertiesList"]["commandProtocolProperties"]):
+ if interface["commandProtocolPropertiesList"]["commandProtocolProperties"][0]["commandProtocol"] == "nvme":
+ break
+ # Check SAS, FC, iSCSI
+ elif ((port["type"] == "fc" and interface["ioInterfaceTypeData"]["interfaceType"] == "fibre") or
+ (port["type"] == interface["ioInterfaceTypeData"]["interfaceType"])):
+ break
+ else:
+ # self.module.fail_json(msg="Invalid port type! Type [%s]. Port [%s]." % (port["type"], port["label"]))
+ self.module.warn("Port type not found in hostside interfaces! Type [%s]. Port [%s]." % (port["type"], port["label"]))
+ except Exception as error:
+ # For older versions of web services
+ for port in self.ports:
+ if port["type"] == "ib" and "iqn" in port["port"]:
+ port["type"] = "iscsi"
+ break
+
+ def assigned_host_ports(self, apply_unassigning=False):
+ """Determine if the hostPorts requested have already been assigned and return list of required used ports."""
+ used_host_ports = {}
+ for host in self.all_hosts:
+ if host["label"].lower() != self.name.lower():
+ for host_port in host["hostSidePorts"]:
+
+ # Compare expected ports with those from other hosts definitions.
+ for port in self.ports:
+ if port["port"] == host_port["address"] or port["label"].lower() == host_port["label"].lower():
+ if not self.force_port:
+ self.module.fail_json(msg="Port label or address is already used and force_port option is set to false!")
+ else:
+ # Determine port reference
+ port_ref = [port["hostPortRef"] for port in host["ports"]
+ if port["hostPortName"] == host_port["address"]]
+ port_ref.extend([port["initiatorRef"] for port in host["initiators"]
+ if port["nodeName"]["iscsiNodeName"] == host_port["address"]])
+
+ # Create dictionary of hosts containing list of port references
+ if host["hostRef"] not in used_host_ports.keys():
+ used_host_ports.update({host["hostRef"]: port_ref})
+ else:
+ used_host_ports[host["hostRef"]].extend(port_ref)
+
+ # Unassign assigned ports
+ if apply_unassigning:
+ for host_ref in used_host_ports.keys():
+ try:
+ rc, resp = self.request("storage-systems/%s/hosts/%s" % (self.ssid, host_ref), method="POST",
+ data={"portsToRemove": used_host_ports[host_ref]})
+ except Exception as err:
+ self.module.fail_json(msg="Failed to unassign host port. Host Id [%s]. Array Id [%s]. Ports [%s]. Error [%s]."
+ % (self.host_obj["id"], self.ssid, used_host_ports[host_ref], to_native(err)))
+
+ @property
+ def host_exists(self):
+ """Determine if the requested host exists
+ As a side effect, set the full list of defined hosts in "all_hosts", and the target host in "host_obj".
+ """
+ match = False
+ all_hosts = list()
+
+ try:
+ rc, all_hosts = self.request("storage-systems/%s/hosts" % self.ssid)
+ except Exception as err:
+ self.module.fail_json(msg="Failed to determine host existence. Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+
+ # Augment the host objects
+ for host in all_hosts:
+ for port in host["hostSidePorts"]:
+ port["type"] = port["type"].lower()
+ port["address"] = port["address"].lower()
+
+ # Augment hostSidePorts with their ID (this is an omission in the API)
+ ports = dict((port["label"], port["id"]) for port in host["ports"])
+ ports.update(dict((port["label"], port["id"]) for port in host["initiators"]))
+
+ for host_side_port in host["hostSidePorts"]:
+ if host_side_port["label"] in ports:
+ host_side_port["id"] = ports[host_side_port["label"]]
+
+ if host["label"].lower() == self.name.lower():
+ self.host_obj = host
+ match = True
+
+ self.all_hosts = all_hosts
+ return match
+
+ @property
+ def needs_update(self):
+ """Determine whether we need to update the Host object
+ As a side effect, we will set the ports that we need to update (portsForUpdate), and the ports we need to add
+ (newPorts), on self.
+ """
+ changed = False
+ if self.host_obj["hostTypeIndex"] != self.host_type_index:
+ changed = True
+
+ current_host_ports = dict((port["id"], {"type": port["type"], "port": port["address"], "label": port["label"]})
+ for port in self.host_obj["hostSidePorts"])
+
+ if self.ports:
+ for port in self.ports:
+ for current_host_port_id in current_host_ports.keys():
+ if port == current_host_ports[current_host_port_id]:
+ current_host_ports.pop(current_host_port_id)
+ break
+
+ elif port["port"] == current_host_ports[current_host_port_id]["port"]:
+ if self.port_on_diff_host(port) and not self.force_port:
+ self.module.fail_json(msg="The port you specified [%s] is associated with a different host."
+ " Specify force_port as True or try a different port spec" % port)
+
+ if (port["label"] != current_host_ports[current_host_port_id]["label"] or
+ port["type"] != current_host_ports[current_host_port_id]["type"]):
+ current_host_ports.pop(current_host_port_id)
+ self.ports_for_update.append({"portRef": current_host_port_id, "port": port["port"],
+ "label": port["label"], "hostRef": self.host_obj["hostRef"]})
+ break
+ else:
+ self.new_ports.append(port)
+
+ self.ports_for_removal = list(current_host_ports.keys())
+ changed = any([self.new_ports, self.ports_for_update, self.ports_for_removal, changed])
+ return changed
+
+ def port_on_diff_host(self, arg_port):
+ """ Checks to see if a passed in port arg is present on a different host"""
+ for host in self.all_hosts:
+
+ # Only check "other" hosts
+ if host["name"].lower() != self.name.lower():
+ for port in host["hostSidePorts"]:
+
+ # Check if the port label is found in the port dict list of each host
+ if arg_port["label"].lower() == port["label"].lower() or arg_port["port"].lower() == port["address"].lower():
+ return True
+ return False
+
+ def update_host(self):
+ self.post_body = {"name": self.name, "hostType": {"index": self.host_type_index}}
+
+ # Remove ports that need reassigning from their current host.
+ if self.ports:
+ self.assigned_host_ports(apply_unassigning=True)
+ self.post_body["portsToUpdate"] = self.ports_for_update
+ self.post_body["portsToRemove"] = self.ports_for_removal
+ self.post_body["ports"] = self.new_ports
+
+ if not self.check_mode:
+ try:
+ rc, self.host_obj = self.request("storage-systems/%s/hosts/%s" % (self.ssid, self.host_obj["id"]), method="POST",
+ data=self.post_body, ignore_errors=True)
+ except Exception as err:
+ self.module.fail_json(msg="Failed to update host. Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+
+ self.module.exit_json(changed=True)
+
+ def create_host(self):
+ # Remove ports that need reassigning from their current host.
+ self.assigned_host_ports(apply_unassigning=True)
+
+ # needs_reassignment = False
+ post_body = dict(name=self.name,
+ hostType=dict(index=self.host_type_index))
+
+ if self.ports:
+ post_body.update(ports=self.ports)
+
+ if not self.host_exists:
+ if not self.check_mode:
+ try:
+ rc, self.host_obj = self.request("storage-systems/%s/hosts" % self.ssid, method="POST", data=post_body, ignore_errors=True)
+ except Exception as err:
+ self.module.fail_json(msg="Failed to create host. Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+ else:
+ payload = self.build_success_payload(self.host_obj)
+ self.module.exit_json(changed=False, msg="Host already exists. Id [%s]. Host [%s]." % (self.ssid, self.name), **payload)
+
+ payload = self.build_success_payload(self.host_obj)
+ self.module.exit_json(changed=True, msg="Host created.")
+
+ def remove_host(self):
+ try:
+ rc, resp = self.request("storage-systems/%s/hosts/%s" % (self.ssid, self.host_obj["id"]), method="DELETE")
+ except Exception as err:
+ self.module.fail_json(msg="Failed to remove host. Host[%s]. Array Id [%s]. Error [%s]." % (self.host_obj["id"], self.ssid, to_native(err)))
+
+ def build_success_payload(self, host=None):
+ keys = [] # ["id"]
+
+ if host:
+ result = dict((key, host[key]) for key in keys)
+ else:
+ result = dict()
+ result["ssid"] = self.ssid
+ result["api_url"] = self.url
+ return result
+
+ def apply(self):
+ if self.state == "present":
+ if self.host_type_index is None:
+ self.host_type_index = self.default_host_type
+
+ self.check_port_types()
+ if self.host_exists:
+ if self.needs_update and self.valid_host_type:
+ self.update_host()
+ else:
+ payload = self.build_success_payload(self.host_obj)
+ self.module.exit_json(changed=False, msg="Host already present; no changes required.", **payload)
+ elif self.valid_host_type:
+ self.create_host()
+ else:
+ payload = self.build_success_payload()
+ if self.host_exists:
+ self.remove_host()
+ self.module.exit_json(changed=True, msg="Host removed.", **payload)
+ else:
+ self.module.exit_json(changed=False, msg="Host already absent.", **payload)
+
+
+def main():
+ host = NetAppESeriesHost()
+ host.apply()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_hostgroup.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_hostgroup.py
new file mode 100644
index 000000000..7b8a9e2aa
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_hostgroup.py
@@ -0,0 +1,279 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+---
+module: na_santricity_hostgroup
+short_description: NetApp E-Series manage array host groups
+author:
+ - Kevin Hulquest (@hulquest)
+ - Nathan Swartz (@ndswartz)
+description: Create, update or destroy host groups on a NetApp E-Series storage array.
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.santricity_doc
+options:
+ state:
+ description:
+ - Whether the specified host group should exist or not.
+ type: str
+ choices: ["present", "absent"]
+ default: present
+ name:
+ description:
+ - Name of the host group to manage
+ type: str
+ required: false
+ hosts:
+ description:
+ - List of host names/labels to add to the group
+ type: list
+ required: false
+"""
+EXAMPLES = """
+ - name: Configure Hostgroup
+ na_santricity_hostgroup:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ state: present
+ name: example_hostgroup
+ hosts:
+ - host01
+ - host02
+"""
+RETURN = """
+clusterRef:
+ description: The unique identification value for this object. Other objects may use this reference value to refer to the cluster.
+ returned: always except when state is absent
+ type: str
+ sample: "3233343536373839303132333100000000000000"
+confirmLUNMappingCreation:
+ description: If true, indicates that creation of LUN-to-volume mappings should require careful confirmation from the end-user, since such a mapping
+ will alter the volume access rights of other clusters, in addition to this one.
+ returned: always
+ type: bool
+ sample: false
+hosts:
+ description: A list of the hosts that are part of the host group after all operations.
+ returned: always except when state is absent
+ type: list
+ sample: ["HostA","HostB"]
+id:
+ description: The id number of the hostgroup
+ returned: always except when state is absent
+ type: str
+ sample: "3233343536373839303132333100000000000000"
+isSAControlled:
+ description: If true, indicates that I/O accesses from this cluster are subject to the storage array's default LUN-to-volume mappings. If false,
+ indicates that I/O accesses from the cluster are subject to cluster-specific LUN-to-volume mappings.
+ returned: always except when state is absent
+ type: bool
+ sample: false
+label:
+ description: The user-assigned, descriptive label string for the cluster.
+ returned: always
+ type: str
+ sample: "MyHostGroup"
+name:
+ description: same as label
+ returned: always except when state is absent
+ type: str
+ sample: "MyHostGroup"
+protectionInformationCapableAccessMethod:
+ description: This field is true if the host has a PI capable access method.
+ returned: always except when state is absent
+ type: bool
+ sample: true
+"""
+from ansible.module_utils._text import to_native
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule, create_multipart_formdata, request
+
+
+class NetAppESeriesHostGroup(NetAppESeriesModule):
+ EXPANSION_TIMEOUT_SEC = 10
+ DEFAULT_DISK_POOL_MINIMUM_DISK_COUNT = 11
+
+ def __init__(self):
+ version = "02.00.0000.0000"
+ ansible_options = dict(
+ state=dict(choices=["present", "absent"], type="str", default="present"),
+ name=dict(required=True, type="str"),
+ hosts=dict(required=False, type="list"))
+ super(NetAppESeriesHostGroup, self).__init__(ansible_options=ansible_options,
+ web_services_version=version,
+ supports_check_mode=True)
+
+ args = self.module.params
+ self.state = args["state"]
+ self.name = args["name"]
+ self.hosts_list = args["hosts"]
+
+ self.current_host_group = None
+ self.hosts_cache = None
+
+ @property
+ def hosts(self):
+ """Retrieve a list of host reference identifiers should be associated with the host group."""
+ if self.hosts_cache is None:
+ self.hosts_cache = []
+ existing_hosts = []
+
+ if self.hosts_list:
+ try:
+ rc, existing_hosts = self.request("storage-systems/%s/hosts" % self.ssid)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve hosts information. Array id [%s]. Error[%s]."
+ % (self.ssid, to_native(error)))
+
+ for host in self.hosts_list:
+ for existing_host in existing_hosts:
+ if host in existing_host["id"] or host.lower() in existing_host["name"].lower():
+ self.hosts_cache.append(existing_host["id"])
+ break
+ else:
+ self.module.fail_json(msg="Expected host does not exist. Array id [%s]. Host [%s]." % (self.ssid, host))
+ self.hosts_cache.sort()
+ return self.hosts_cache
+
+ @property
+ def host_groups(self):
+ """Retrieve a list of existing host groups."""
+ host_groups = []
+ hosts = []
+ try:
+ rc, host_groups = self.request("storage-systems/%s/host-groups" % self.ssid)
+ rc, hosts = self.request("storage-systems/%s/hosts" % self.ssid)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve host group information. Array id [%s]. Error[%s]."
+ % (self.ssid, to_native(error)))
+
+ host_groups = [{"id": group["clusterRef"], "name": group["name"]} for group in host_groups]
+ for group in host_groups:
+ hosts_ids = []
+ for host in hosts:
+ if group["id"] == host["clusterRef"]:
+ hosts_ids.append(host["hostRef"])
+ group.update({"hosts": hosts_ids})
+
+ return host_groups
+
+ @property
+ def current_hosts_in_host_group(self):
+ """Retrieve the current hosts associated with the current hostgroup."""
+ current_hosts = []
+ for group in self.host_groups:
+ if group["name"] == self.name:
+ current_hosts = group["hosts"]
+ break
+
+ return current_hosts
+
+ def unassign_hosts(self, host_list=None):
+ """Unassign hosts from host group."""
+ if host_list is None:
+ host_list = self.current_host_group["hosts"]
+
+ for host_id in host_list:
+ try:
+ rc, resp = self.request("storage-systems/%s/hosts/%s/move" % (self.ssid, host_id),
+ method="POST", data={"group": "0000000000000000000000000000000000000000"})
+ except Exception as error:
+ self.module.fail_json(msg="Failed to unassign hosts from host group. Array id [%s]. Host id [%s]."
+ " Error[%s]." % (self.ssid, host_id, to_native(error)))
+
+ def delete_host_group(self, unassign_hosts=True):
+ """Delete host group"""
+ if unassign_hosts:
+ self.unassign_hosts()
+
+ try:
+ rc, resp = self.request("storage-systems/%s/host-groups/%s" % (self.ssid, self.current_host_group["id"]), method="DELETE")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to delete host group. Array id [%s]. Error[%s]." % (self.ssid, to_native(error)))
+
+ def create_host_group(self):
+ """Create host group."""
+ data = {"name": self.name, "hosts": self.hosts}
+
+ response = None
+ try:
+ rc, response = self.request("storage-systems/%s/host-groups" % self.ssid, method="POST", data=data)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to create host group. Array id [%s]. Error[%s]." % (self.ssid, to_native(error)))
+
+ return response
+
+ def update_host_group(self):
+ """Update host group."""
+ data = {"name": self.name, "hosts": self.hosts}
+
+ # unassign hosts that should not be part of the hostgroup
+ desired_host_ids = self.hosts
+ for host in self.current_hosts_in_host_group:
+ if host not in desired_host_ids:
+ self.unassign_hosts([host])
+
+ update_response = None
+ try:
+ rc, update_response = self.request("storage-systems/%s/host-groups/%s" % (self.ssid, self.current_host_group["id"]), method="POST", data=data)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to create host group. Array id [%s]. Error[%s]." % (self.ssid, to_native(error)))
+
+ return update_response
+
+ def apply(self):
+ """Apply desired host group state to the storage array."""
+ changes_required = False
+
+ # Search for existing host group match
+ for group in self.host_groups:
+ if group["name"] == self.name:
+ self.current_host_group = group
+ self.current_host_group["hosts"].sort()
+ break
+
+ # Determine whether changes are required
+ if self.state == "present":
+ if self.current_host_group:
+ if self.hosts and self.hosts != self.current_host_group["hosts"]:
+ changes_required = True
+ else:
+ if not self.name:
+ self.module.fail_json(msg="The option name must be supplied when creating a new host group. Array id [%s]." % self.ssid)
+ changes_required = True
+
+ elif self.current_host_group:
+ changes_required = True
+
+ # Apply any necessary changes
+ msg = ""
+ if changes_required and not self.module.check_mode:
+ msg = "No changes required."
+ if self.state == "present":
+ if self.current_host_group:
+ if self.hosts != self.current_host_group["hosts"]:
+ msg = self.update_host_group()
+ else:
+ msg = self.create_host_group()
+
+ elif self.current_host_group:
+ self.delete_host_group()
+ msg = "Host group deleted. Array Id [%s]. Host group [%s]." % (self.ssid, self.current_host_group["name"])
+
+ self.module.exit_json(msg=msg, changed=changes_required)
+
+
+def main():
+ hostgroup = NetAppESeriesHostGroup()
+ hostgroup.apply()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_ib_iser_interface.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_ib_iser_interface.py
new file mode 100644
index 000000000..364bef73f
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_ib_iser_interface.py
@@ -0,0 +1,257 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+---
+module: na_santricity_ib_iser_interface
+short_description: NetApp E-Series manage InfiniBand iSER interface configuration
+description:
+ - Configure settings of an E-Series InfiniBand iSER interface IPv4 address configuration.
+author:
+ - Michael Price (@lmprice)
+ - Nathan Swartz (@ndswartz)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.santricity_doc
+options:
+ controller:
+ description:
+ - The controller that owns the port you want to configure.
+ - Controller names are presented alphabetically, with the first controller as A, the second as B, and so on.
+ - Current hardware models have either 1 or 2 available controllers, but that is not a guaranteed hard limitation and could change in the future.
+ type: str
+ required: true
+ choices:
+ - A
+ - B
+ channel:
+ description:
+ - The InfiniBand HCA port you wish to modify.
+ - Ports start left to right and start with 1.
+ type: int
+ required: true
+ address:
+ description:
+ - The IPv4 address to assign to the interface.
+ - Should be specified in xx.xx.xx.xx form.
+ type: str
+ required: true
+notes:
+ - Check mode is supported.
+"""
+
+EXAMPLES = """
+ - name: Configure the first port on the A controller with a static IPv4 address
+ na_santricity_ib_iser_interface:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ controller: "A"
+ channel: "1"
+ address: "192.168.1.100"
+"""
+
+RETURN = """
+msg:
+ description: Success message
+ returned: on success
+ type: str
+ sample: The interface settings have been updated.
+enabled:
+ description:
+ - Indicates whether IPv4 connectivity has been enabled or disabled.
+ - This does not necessarily indicate connectivity. If dhcp was enabled without a dhcp server, for instance,
+ it is unlikely that the configuration will actually be valid.
+ returned: on success
+ sample: True
+ type: bool
+"""
+import re
+
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule
+from ansible.module_utils._text import to_native
+
+
+class NetAppESeriesIbIserInterface(NetAppESeriesModule):
+ def __init__(self):
+ ansible_options = dict(controller=dict(type="str", required=True, choices=["A", "B"]),
+ channel=dict(type="int"),
+ address=dict(type="str", required=True))
+
+ super(NetAppESeriesIbIserInterface, self).__init__(ansible_options=ansible_options,
+ web_services_version="02.00.0000.0000",
+ supports_check_mode=True)
+
+ args = self.module.params
+ self.controller = args["controller"]
+ self.channel = args["channel"]
+ self.address = args["address"]
+ self.check_mode = self.module.check_mode
+
+ self.get_target_interface_cache = None
+
+ # A relatively primitive regex to validate that the input is formatted like a valid ip address
+ address_regex = re.compile(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$")
+ if self.address and not address_regex.match(self.address):
+ self.module.fail_json(msg="An invalid ip address was provided for address.")
+
+ def get_interfaces(self):
+ """Retrieve and filter all hostside interfaces for IB iSER."""
+ ifaces = []
+ try:
+ rc, ifaces = self.request("storage-systems/%s/interfaces?channelType=hostside" % self.ssid)
+ except Exception as err:
+ self.module.fail_json(msg="Failed to retrieve defined host interfaces. Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+
+ # Filter out non-ib-iser interfaces
+ ib_iser_ifaces = []
+ for iface in ifaces:
+ if ((iface["ioInterfaceTypeData"]["interfaceType"] == "iscsi" and
+ iface["ioInterfaceTypeData"]["iscsi"]["interfaceData"]["type"] == "infiniband" and
+ iface["ioInterfaceTypeData"]["iscsi"]["interfaceData"]["infinibandData"]["isIser"]) or
+ (iface["ioInterfaceTypeData"]["interfaceType"] == "ib" and
+ iface["ioInterfaceTypeData"]["ib"]["isISERSupported"])):
+ ib_iser_ifaces.append(iface)
+
+ if not ib_iser_ifaces:
+ self.module.fail_json(msg="Failed to detect any InfiniBand iSER interfaces! Array [%s] - %s." % self.ssid)
+
+ return ib_iser_ifaces
+
+ def get_controllers(self):
+ """Retrieve a mapping of controller labels to their references
+ {
+ 'A': '070000000000000000000001',
+ 'B': '070000000000000000000002',
+ }
+ :return: the controllers defined on the system
+ """
+ controllers = list()
+ try:
+ rc, controllers = self.request("storage-systems/%s/graph/xpath-filter?query=/controller/id" % self.ssid)
+ except Exception as err:
+ self.module.fail_json(msg="Failed to retrieve controller list! Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(err)))
+
+ controllers.sort()
+
+ controllers_dict = {}
+ i = ord('A')
+ for controller in controllers:
+ label = chr(i)
+ controllers_dict[label] = controller
+ i += 1
+
+ return controllers_dict
+
+ def get_ib_link_status(self):
+ """Determine the infiniband link status. Returns dictionary keyed by interface reference number."""
+ link_statuses = {}
+ try:
+ rc, result = self.request("storage-systems/%s/hardware-inventory" % self.ssid)
+ for link in result["ibPorts"]:
+ link_statuses.update({link["channelPortRef"]: link["linkState"]})
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve ib link status information! Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(error)))
+
+ return link_statuses
+
+ def get_target_interface(self):
+ """Search for the selected IB iSER interface"""
+ if self.get_target_interface_cache is None:
+ ifaces = self.get_interfaces()
+ ifaces_status = self.get_ib_link_status()
+ controller_id = self.get_controllers()[self.controller]
+
+ controller_ifaces = []
+ for iface in ifaces:
+ if iface["ioInterfaceTypeData"]["interfaceType"] == "iscsi" and iface["controllerRef"] == controller_id:
+ controller_ifaces.append([iface["ioInterfaceTypeData"]["iscsi"]["channel"], iface,
+ ifaces_status[iface["ioInterfaceTypeData"]["iscsi"]["channelPortRef"]]])
+ elif iface["ioInterfaceTypeData"]["interfaceType"] == "ib" and iface["controllerRef"] == controller_id:
+ controller_ifaces.append([iface["ioInterfaceTypeData"]["ib"]["channel"], iface,
+ iface["ioInterfaceTypeData"]["ib"]["linkState"]])
+
+ sorted_controller_ifaces = sorted(controller_ifaces)
+ if self.channel < 1 or self.channel > len(controller_ifaces):
+ status_msg = ", ".join(["%s (link %s)" % (index + 1, values[2])
+ for index, values in enumerate(sorted_controller_ifaces)])
+ self.module.fail_json(msg="Invalid controller %s HCA channel. Available channels: %s, Array Id [%s]."
+ % (self.controller, status_msg, self.ssid))
+
+ self.get_target_interface_cache = sorted_controller_ifaces[self.channel - 1][1]
+ return self.get_target_interface_cache
+
+ def is_change_required(self):
+ """Determine whether change is required."""
+ changed_required = False
+ iface = self.get_target_interface()
+ if (iface["ioInterfaceTypeData"]["interfaceType"] == "iscsi" and
+ iface["ioInterfaceTypeData"]["iscsi"]["ipv4Data"]["ipv4AddressData"]["ipv4Address"] != self.address):
+ changed_required = True
+
+ elif iface["ioInterfaceTypeData"]["interfaceType"] == "ib" and iface["ioInterfaceTypeData"]["ib"]["isISERSupported"]:
+ for properties in iface["commandProtocolPropertiesList"]["commandProtocolProperties"]:
+ if (properties["commandProtocol"] == "scsi" and
+ properties["scsiProperties"]["scsiProtocolType"] == "iser" and
+ properties["scsiProperties"]["iserProperties"]["ipv4Data"]["ipv4AddressData"]["ipv4Address"] != self.address):
+ changed_required = True
+
+ return changed_required
+
+ def make_request_body(self):
+ iface = self.get_target_interface()
+ body = {"iscsiInterface": iface["ioInterfaceTypeData"][iface["ioInterfaceTypeData"]["interfaceType"]]["id"],
+ "settings": {"tcpListenPort": [],
+ "ipv4Address": [self.address],
+ "ipv4SubnetMask": [],
+ "ipv4GatewayAddress": [],
+ "ipv4AddressConfigMethod": [],
+ "maximumFramePayloadSize": [],
+ "ipv4VlanId": [],
+ "ipv4OutboundPacketPriority": [],
+ "ipv4Enabled": [],
+ "ipv6Enabled": [],
+ "ipv6LocalAddresses": [],
+ "ipv6RoutableAddresses": [],
+ "ipv6PortRouterAddress": [],
+ "ipv6AddressConfigMethod": [],
+ "ipv6OutboundPacketPriority": [],
+ "ipv6VlanId": [],
+ "ipv6HopLimit": [],
+ "ipv6NdReachableTime": [],
+ "ipv6NdRetransmitTime": [],
+ "ipv6NdStaleTimeout": [],
+ "ipv6DuplicateAddressDetectionAttempts": [],
+ "maximumInterfaceSpeed": []}}
+ return body
+
+ def update(self):
+ """Make any necessary updates."""
+ update_required = self.is_change_required()
+ if update_required and not self.check_mode:
+ try:
+ rc, result = self.request("storage-systems/%s/symbol/setIscsiInterfaceProperties"
+ % self.ssid, method="POST", data=self.make_request_body())
+ except Exception as error:
+ self.module.fail_json(msg="Failed to modify the interface! Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(error)))
+ self.module.exit_json(msg="The interface settings have been updated.", changed=update_required)
+
+ self.module.exit_json(msg="No changes were required.", changed=update_required)
+
+
+def main():
+ ib_iser = NetAppESeriesIbIserInterface()
+ ib_iser.update()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_iscsi_interface.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_iscsi_interface.py
new file mode 100644
index 000000000..e85e8b68c
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_iscsi_interface.py
@@ -0,0 +1,423 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+---
+module: na_santricity_iscsi_interface
+short_description: NetApp E-Series manage iSCSI interface configuration
+description:
+ - Configure settings of an E-Series iSCSI interface
+author:
+ - Michael Price (@lmprice)
+ - Nathan Swartz (@ndswartz)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.santricity_doc
+options:
+ controller:
+ description:
+ - The controller that owns the port you want to configure.
+ - Controller names are presented alphabetically, with the first controller as A,
+ the second as B, and so on.
+ - Current hardware models have either 1 or 2 available controllers, but that is not a guaranteed hard
+ limitation and could change in the future.
+ type: str
+ required: true
+ choices:
+ - A
+ - B
+ port:
+ description:
+ - The controller iSCSI baseboard or HIC port to modify.
+ - Determine the port by counting, starting from one, the controller's iSCSI ports left to right. Count the
+ baseboard and then the HIC ports.
+ type: int
+ required: true
+ state:
+ description:
+ - When enabled, the provided configuration will be utilized.
+ - When disabled, the IPv4 configuration will be cleared and IPv4 connectivity disabled.
+ type: str
+ choices:
+ - enabled
+ - disabled
+ default: enabled
+ address:
+ description:
+ - The IPv4 address to assign to the interface.
+ - Should be specified in xx.xx.xx.xx form.
+ - Mutually exclusive with I(config_method=dhcp)
+ type: str
+ required: false
+ subnet_mask:
+ description:
+ - The subnet mask to utilize for the interface.
+ - Should be specified in xx.xx.xx.xx form.
+ - Mutually exclusive with I(config_method=dhcp)
+ type: str
+ gateway:
+ description:
+ - The IPv4 gateway address to utilize for the interface.
+ - Should be specified in xx.xx.xx.xx form.
+ - Mutually exclusive with I(config_method=dhcp)
+ type: str
+ required: false
+ config_method:
+ description:
+ - The configuration method type to use for this interface.
+ - dhcp is mutually exclusive with I(address), I(subnet_mask), and I(gateway).
+ type: str
+ choices:
+ - dhcp
+ - static
+ default: dhcp
+ required: false
+ mtu:
+ description:
+ - The maximum transmission units (MTU), in bytes.
+ - This allows you to configure a larger value for the MTU, in order to enable jumbo frames
+ (any value > 1500).
+ - Generally, it is necessary to have your host, switches, and other components not only support jumbo
+ frames, but also have it configured properly. Therefore, unless you know what you're doing, it's best to
+ leave this at the default.
+ type: int
+ default: 1500
+ required: false
+ aliases:
+ - max_frame_size
+ speed:
+ description:
+ - The option will change the interface port speed.
+ - Only supported speeds will be accepted and must be in the form [0-9]+[gm] (i.e. 25g)
+ - 'Down' interfaces will report 'Unknown' speed until they are set to an accepted network speed.
+ - Do not use this option when the port's speed is automatically configured as it will fail. See System
+ Manager for the port's capability.
+ type: str
+ required: false
+notes:
+ - Check mode is supported.
+ - The interface settings are applied synchronously, but changes to the interface itself (receiving a new IP address
+ via dhcp, etc), can take seconds or minutes longer to take effect.
+ - This module will not be useful/usable on an E-Series system without any iSCSI interfaces.
+ - This module requires a Web Services API version of >= 1.3.
+"""
+
+EXAMPLES = """
+ - name: Configure the first port on the A controller with a static IPv4 address
+ na_santricity_iscsi_interface:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ port: "1"
+ controller: "A"
+ config_method: static
+ address: "192.168.1.100"
+ subnet_mask: "255.255.255.0"
+ gateway: "192.168.1.1"
+ speed: "25g"
+
+ - name: Disable ipv4 connectivity for the second port on the B controller
+ na_santricity_iscsi_interface:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ port: "2"
+ controller: "B"
+ state: disabled
+
+ - name: Enable jumbo frames for the first 4 ports on controller A
+ na_santricity_iscsi_interface:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ port: "{{ item }}"
+ controller: "A"
+ state: enabled
+ mtu: 9000
+ config_method: dhcp
+ loop:
+ - 1
+ - 2
+ - 3
+ - 4
+"""
+
+RETURN = """
+msg:
+ description: Success message
+ returned: on success
+ type: str
+ sample: The interface settings have been updated.
+"""
+import re
+
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule
+from ansible.module_utils._text import to_native
+
+def strip_interface_speed(speed):
+ """Converts symbol interface speeds to a more common notation. Example: 'speed10gig' -> '10g'"""
+ if isinstance(speed, list):
+ result = [re.match(r"speed[0-9]{1,3}[gm]", sp) for sp in speed]
+ result = [sp.group().replace("speed", "") if result else "unknown" for sp in result if sp]
+ result = ["auto" if re.match(r"auto", sp) else sp for sp in result]
+ else:
+ result = re.match(r"speed[0-9]{1,3}[gm]", speed)
+ result = result.group().replace("speed", "") if result else "unknown"
+ result = "auto" if re.match(r"auto", result.lower()) else result
+ return result
+
+class NetAppESeriesIscsiInterface(NetAppESeriesModule):
+ def __init__(self):
+ ansible_options = dict(controller=dict(type="str", required=True, choices=["A", "B"]),
+ port=dict(type="int", required=True),
+ state=dict(type="str", required=False, default="enabled", choices=["enabled", "disabled"]),
+ address=dict(type="str", required=False),
+ subnet_mask=dict(type="str", required=False),
+ gateway=dict(type="str", required=False),
+ config_method=dict(type="str", required=False, default="dhcp", choices=["dhcp", "static"]),
+ mtu=dict(type="int", default=1500, required=False, aliases=["max_frame_size"]),
+ speed=dict(type="str", required=False))
+
+ required_if = [["config_method", "static", ["address", "subnet_mask"]]]
+ super(NetAppESeriesIscsiInterface, self).__init__(ansible_options=ansible_options,
+ web_services_version="02.00.0000.0000",
+ required_if=required_if,
+ supports_check_mode=True)
+
+ args = self.module.params
+ self.controller = args["controller"]
+ self.port = args["port"]
+ self.mtu = args["mtu"]
+ self.state = args["state"]
+ self.address = args["address"]
+ self.subnet_mask = args["subnet_mask"]
+ self.gateway = args["gateway"]
+ self.config_method = args["config_method"]
+ self.speed = args["speed"]
+
+ self.check_mode = self.module.check_mode
+ self.post_body = dict()
+ self.controllers = list()
+ self.get_target_interface_cache = None
+
+ if self.mtu < 1500 or self.mtu > 9000:
+ self.module.fail_json(msg="The provided mtu is invalid, it must be > 1500 and < 9000 bytes.")
+
+ if self.config_method == "dhcp" and any([self.address, self.subnet_mask, self.gateway]):
+ self.module.fail_json(msg="A config_method of dhcp is mutually exclusive with the address,"
+ " subnet_mask, and gateway options.")
+
+ # A relatively primitive regex to validate that the input is formatted like a valid ip address
+ address_regex = re.compile(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$")
+
+ if self.address and not address_regex.match(self.address):
+ self.module.fail_json(msg="An invalid ip address was provided for address.")
+
+ if self.subnet_mask and not address_regex.match(self.subnet_mask):
+ self.module.fail_json(msg="An invalid ip address was provided for subnet_mask.")
+
+ if self.gateway and not address_regex.match(self.gateway):
+ self.module.fail_json(msg="An invalid ip address was provided for gateway.")
+
+ self.get_host_board_id_cache = None
+
+ @property
+ def interfaces(self):
+ ifaces = list()
+ try:
+ rc, ifaces = self.request("storage-systems/%s/graph/xpath-filter?query=/controller/hostInterfaces" % self.ssid)
+ except Exception as err:
+ self.module.fail_json(msg="Failed to retrieve defined host interfaces. Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+
+ # Filter out non-iSCSI interfaces
+ iscsi_interfaces = []
+ for iface in [iface for iface in ifaces if iface["interfaceType"] == "iscsi"]:
+ if iface["iscsi"]["interfaceData"]["type"] == "ethernet":
+ iscsi_interfaces.append(iface)
+
+ return iscsi_interfaces
+
+ def get_host_board_id(self, iface_ref):
+ if self.get_host_board_id_cache is None:
+ try:
+ rc, iface_board_map_list = self.request("storage-systems/%s/graph/xpath-filter?query=/ioInterfaceHicMap" % self.ssid)
+ except Exception as err:
+ self.module.fail_json(msg="Failed to retrieve IO interface HIC mappings! Array Id [%s]."
+ " Error [%s]." % (self.ssid, to_native(err)))
+
+ self.get_host_board_id_cache = dict()
+ for iface_board_map in iface_board_map_list:
+ self.get_host_board_id_cache.update({iface_board_map["interfaceRef"]: iface_board_map["hostBoardRef"]})
+
+ return self.get_host_board_id_cache[iface_ref]
+
+
+ def get_controllers(self):
+ """Retrieve a mapping of controller labels to their references
+ {
+ "A": "070000000000000000000001",
+ "B": "070000000000000000000002",
+ }
+ :return: the controllers defined on the system
+ """
+ controllers = list()
+ try:
+ rc, controllers = self.request("storage-systems/%s/graph/xpath-filter?query=/controller/id" % self.ssid)
+ except Exception as err:
+ self.module.fail_json(msg="Failed to retrieve controller list! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+
+ controllers.sort()
+
+ controllers_dict = {}
+ i = ord("A")
+ for controller in controllers:
+ label = chr(i)
+ controllers_dict[label] = controller
+ i += 1
+
+ return controllers_dict
+
+ def get_target_interface(self):
+ """Retrieve the specific controller iSCSI interface."""
+ if self.get_target_interface_cache is None:
+ ifaces = self.interfaces
+
+ controller_ifaces = []
+ for iface in ifaces:
+ if self.controllers[self.controller] == iface["iscsi"]["controllerId"]:
+ controller_ifaces.append([iface["iscsi"]["channel"], iface, iface["iscsi"]["interfaceData"]["ethernetData"]["linkStatus"]])
+
+ sorted_controller_ifaces = sorted(controller_ifaces)
+ if self.port < 1 or self.port > len(controller_ifaces):
+ status_msg = ", ".join(["%s (link %s)" % (index + 1, values[2]) for index, values in enumerate(sorted_controller_ifaces)])
+ self.module.fail_json(msg="Invalid controller %s iSCSI port. Available ports: %s, Array Id [%s]."
+ % (self.controller, status_msg, self.ssid))
+
+ self.get_target_interface_cache = sorted_controller_ifaces[self.port - 1][1]
+ return self.get_target_interface_cache
+
+ def make_update_body(self, target_iface):
+ target_iface = target_iface["iscsi"]
+ body = dict(iscsiInterface=target_iface["id"])
+ update_required = False
+
+ if self.state == "enabled":
+ settings = dict()
+ if not target_iface["ipv4Enabled"]:
+ update_required = True
+ settings["ipv4Enabled"] = [True]
+ if self.mtu != target_iface["interfaceData"]["ethernetData"]["maximumFramePayloadSize"]:
+ update_required = True
+ settings["maximumFramePayloadSize"] = [self.mtu]
+ if self.config_method == "static":
+ ipv4Data = target_iface["ipv4Data"]["ipv4AddressData"]
+
+ if ipv4Data["ipv4Address"] != self.address:
+ update_required = True
+ settings["ipv4Address"] = [self.address]
+ if ipv4Data["ipv4SubnetMask"] != self.subnet_mask:
+ update_required = True
+ settings["ipv4SubnetMask"] = [self.subnet_mask]
+ if self.gateway is not None and ipv4Data["ipv4GatewayAddress"] != self.gateway:
+ update_required = True
+ settings["ipv4GatewayAddress"] = [self.gateway]
+
+ if target_iface["ipv4Data"]["ipv4AddressConfigMethod"] != "configStatic":
+ update_required = True
+ settings["ipv4AddressConfigMethod"] = ["configStatic"]
+
+ elif target_iface["ipv4Data"]["ipv4AddressConfigMethod"] != "configDhcp":
+ update_required = True
+ settings.update(dict(ipv4Enabled=[True],
+ ipv4AddressConfigMethod=["configDhcp"]))
+ body["settings"] = settings
+
+ else:
+ if target_iface["ipv4Enabled"]:
+ update_required = True
+ body["settings"] = dict(ipv4Enabled=[False])
+
+ return update_required, body
+
+ def make_update_speed_body(self, target_iface):
+ target_iface = target_iface["iscsi"]
+
+ # Check whether HIC speed should be changed.
+ if self.speed is None:
+ return False, dict()
+ else:
+ if target_iface["interfaceData"]["ethernetData"]["autoconfigSupport"]:
+ self.module.warn("This interface's HIC speed is autoconfigured!")
+ return False, dict()
+ if self.speed == strip_interface_speed(target_iface["interfaceData"]["ethernetData"]["currentInterfaceSpeed"]):
+ return False, dict()
+
+ # Create a dictionary containing supported HIC speeds keyed by simplified value to the complete value (ie. {"10g": "speed10gig"})
+ supported_speeds = dict()
+ for supported_speed in target_iface["interfaceData"]["ethernetData"]["supportedInterfaceSpeeds"]:
+ supported_speeds.update({strip_interface_speed(supported_speed): supported_speed})
+
+ if self.speed not in supported_speeds:
+ self.module.fail_json(msg="The host interface card (HIC) does not support the provided speed. Array Id [%s]. Supported speeds [%s]" % (self.ssid, ", ".join(supported_speeds.keys())))
+
+ body = {"settings": {"maximumInterfaceSpeed": [supported_speeds[self.speed]]}, "portsRef": {}}
+ hic_ref = self.get_host_board_id(target_iface["id"])
+ if hic_ref == "0000000000000000000000000000000000000000":
+ body.update({"portsRef": {"portRefType": "baseBoard", "baseBoardRef": target_iface["id"], "hicRef": ""}})
+ else:
+ body.update({"portsRef":{"portRefType": "hic", "hicRef": hic_ref, "baseBoardRef": ""}})
+
+ return True, body
+
+ def update(self):
+ self.controllers = self.get_controllers()
+ if self.controller not in self.controllers:
+ self.module.fail_json(msg="The provided controller name is invalid. Valid controllers: %s." % ", ".join(self.controllers.keys()))
+
+ iface_before = self.get_target_interface()
+ update_required, body = self.make_update_body(iface_before)
+ if update_required and not self.check_mode:
+ try:
+ rc, result = self.request("storage-systems/%s/symbol/setIscsiInterfaceProperties" % self.ssid, method="POST", data=body, ignore_errors=True)
+ # We could potentially retry this a few times, but it's probably a rare enough case (unless a playbook
+ # is cancelled mid-flight), that it isn't worth the complexity.
+ if rc == 422 and result["retcode"] in ["busy", "3"]:
+ self.module.fail_json(msg="The interface is currently busy (probably processing a previously requested modification request)."
+ " This operation cannot currently be completed. Array Id [%s]. Error [%s]." % (self.ssid, result))
+ # Handle authentication issues, etc.
+ elif rc != 200:
+ self.module.fail_json(msg="Failed to modify the interface! Array Id [%s]. Error [%s]." % (self.ssid, to_native(result)))
+ # This is going to catch cases like a connection failure
+ except Exception as err:
+ self.module.fail_json(msg="Connection failure: we failed to modify the interface! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+
+ update_speed_required, speed_body = self.make_update_speed_body(iface_before)
+ if update_speed_required and not self.check_mode:
+ try:
+
+ rc, result = self.request("storage-systems/%s/symbol/setHostPortsAttributes?verboseErrorResponse=true" % self.ssid, method="POST", data=speed_body)
+ except Exception as err:
+ self.module.fail_json(msg="Failed to update host interface card speed. Array Id [%s], Body [%s]. Error [%s]." % (self.ssid, speed_body, to_native(err)))
+
+ if update_required or update_speed_required:
+ self.module.exit_json(msg="The interface settings have been updated.", changed=True)
+ self.module.exit_json(msg="No changes were required.", changed=False)
+
+
+def main():
+ iface = NetAppESeriesIscsiInterface()
+ iface.update()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_iscsi_target.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_iscsi_target.py
new file mode 100644
index 000000000..869c2d58e
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_iscsi_target.py
@@ -0,0 +1,246 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+---
+module: na_santricity_iscsi_target
+short_description: NetApp E-Series manage iSCSI target configuration
+description:
+ - Configure the settings of an E-Series iSCSI target
+author:
+ - Michael Price (@lmprice)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.santricity_doc
+options:
+ name:
+ description:
+ - The name/alias to assign to the iSCSI target.
+ - This alias is often used by the initiator software in order to make an iSCSI target easier to identify.
+ type: str
+ required: false
+ aliases:
+ - alias
+ ping:
+ description:
+ - Enable ICMP ping responses from the configured iSCSI ports.
+ type: bool
+ default: true
+ required: false
+ chap_secret:
+ description:
+ - Enable Challenge-Handshake Authentication Protocol (CHAP), utilizing this value as the password.
+ - When this value is specified, we will always trigger an update (changed=True). We have no way of verifying
+ whether or not the password has changed.
+ - The chap secret may only use ascii characters with values between 32 and 126 decimal.
+ - The chap secret must be no less than 12 characters, but no greater than 57 characters in length.
+ - The chap secret is cleared when not specified or an empty string.
+ type: str
+ required: false
+ aliases:
+ - chap
+ - password
+ unnamed_discovery:
+ description:
+ - When an initiator initiates a discovery session to an initiator port, it is considered an unnamed
+ discovery session if the iSCSI target iqn is not specified in the request.
+ - This option may be disabled to increase security if desired.
+ type: bool
+ default: true
+ required: false
+notes:
+ - Check mode is supported.
+ - Some of the settings are dependent on the settings applied to the iSCSI interfaces. These can be configured using
+ M(na_santricity_iscsi_interface).
+ - This module requires a Web Services API version of >= 1.3.
+"""
+
+EXAMPLES = """
+ - name: Enable ping responses and unnamed discovery sessions for all iSCSI ports
+ na_santricity_iscsi_target:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ name: myTarget
+ ping: true
+ unnamed_discovery: true
+
+ - name: Set the target alias and the CHAP secret
+ na_santricity_iscsi_target:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ name: myTarget
+ chap: password1234
+"""
+
+RETURN = """
+msg:
+ description: Success message
+ returned: on success
+ type: str
+ sample: The iSCSI target settings have been updated.
+alias:
+ description:
+ - The alias assigned to the iSCSI target.
+ returned: on success
+ sample: myArray
+ type: str
+iqn:
+ description:
+ - The iqn (iSCSI Qualified Name), assigned to the iSCSI target.
+ returned: on success
+ sample: iqn.1992-08.com.netapp:2800.000a132000b006d2000000005a0e8f45
+ type: str
+"""
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule
+from ansible.module_utils._text import to_native
+
+HEADERS = {
+ "Content-Type": "application/json",
+ "Accept": "application/json",
+}
+
+
+class NetAppESeriesIscsiTarget(NetAppESeriesModule):
+ def __init__(self):
+ ansible_options = dict(name=dict(type="str", required=False, aliases=["alias"]),
+ ping=dict(type="bool", required=False, default=True),
+ chap_secret=dict(type="str", required=False, aliases=["chap", "password"], no_log=True),
+ unnamed_discovery=dict(type="bool", required=False, default=True))
+
+ super(NetAppESeriesIscsiTarget, self).__init__(ansible_options=ansible_options,
+ web_services_version="02.00.0000.0000",
+ supports_check_mode=True)
+
+ args = self.module.params
+
+ self.name = args["name"]
+ self.ping = args["ping"]
+ self.chap_secret = args["chap_secret"]
+ self.unnamed_discovery = args["unnamed_discovery"]
+
+ self.check_mode = self.module.check_mode
+ self.post_body = dict()
+ self.controllers = list()
+
+ if self.chap_secret:
+ if len(self.chap_secret) < 12 or len(self.chap_secret) > 57:
+ self.module.fail_json(msg="The provided CHAP secret is not valid, it must be between 12 and 57"
+ " characters in length.")
+
+ for c in self.chap_secret:
+ ordinal = ord(c)
+ if ordinal < 32 or ordinal > 126:
+ self.module.fail_json(msg="The provided CHAP secret is not valid, it may only utilize ascii"
+ " characters with decimal values between 32 and 126.")
+
+ @property
+ def target(self):
+ """Provide information on the iSCSI Target configuration
+
+ Sample:
+ {
+ "alias": "myCustomName",
+ "ping": True,
+ "unnamed_discovery": True,
+ "chap": False,
+ "iqn": "iqn.1992-08.com.netapp:2800.000a132000b006d2000000005a0e8f45",
+ }
+ """
+ target = dict()
+ try:
+ rc, data = self.request("storage-systems/%s/graph/xpath-filter?query=/storagePoolBundle/target" % self.ssid)
+ # This likely isn"t an iSCSI-enabled system
+ if not data:
+ self.module.fail_json(msg="This storage-system does not appear to have iSCSI interfaces. Array Id [%s]." % self.ssid)
+
+ data = data[0]
+ chap = any([auth for auth in data["configuredAuthMethods"]["authMethodData"] if auth["authMethod"] == "chap"])
+ target.update(dict(alias=data["alias"]["iscsiAlias"], iqn=data["nodeName"]["iscsiNodeName"], chap=chap))
+
+ rc, data = self.request("storage-systems/%s/graph/xpath-filter?query=/sa/iscsiEntityData" % self.ssid)
+
+ data = data[0]
+ target.update(dict(ping=data["icmpPingResponseEnabled"], unnamed_discovery=data["unnamedDiscoverySessionsEnabled"]))
+
+ except Exception as err:
+ self.module.fail_json(msg="Failed to retrieve the iSCSI target information. Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+
+ return target
+
+ def apply_iscsi_settings(self):
+ """Update the iSCSI target alias and CHAP settings"""
+ update = False
+ target = self.target
+
+ body = dict()
+
+ if self.name is not None and self.name != target["alias"]:
+ update = True
+ body["alias"] = self.name
+
+ # If the CHAP secret was provided, we trigger an update.
+ if self.chap_secret:
+ update = True
+ body.update(dict(enableChapAuthentication=True,
+ chapSecret=self.chap_secret))
+ # If no secret was provided, then we disable chap
+ elif target["chap"]:
+ update = True
+ body.update(dict(enableChapAuthentication=False))
+
+ if update and not self.check_mode:
+ try:
+ self.request("storage-systems/%s/iscsi/target-settings" % self.ssid, method="POST", data=body)
+ except Exception as err:
+ self.module.fail_json(msg="Failed to update the iSCSI target settings. Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+
+ return update
+
+ def apply_target_changes(self):
+ update = False
+ target = self.target
+
+ body = dict()
+
+ if self.ping != target["ping"]:
+ update = True
+ body["icmpPingResponseEnabled"] = self.ping
+
+ if self.unnamed_discovery != target["unnamed_discovery"]:
+ update = True
+ body["unnamedDiscoverySessionsEnabled"] = self.unnamed_discovery
+
+ if update and not self.check_mode:
+ try:
+ self.request("storage-systems/%s/iscsi/entity" % self.ssid, method="POST", data=body)
+ except Exception as err:
+ self.module.fail_json(msg="Failed to update the iSCSI target settings. Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+ return update
+
+ def update(self):
+ update = self.apply_iscsi_settings()
+ update = self.apply_target_changes() or update
+
+ target = self.target
+ data = dict((key, target[key]) for key in target if key in ["iqn", "alias"])
+
+ self.module.exit_json(msg="The interface settings have been updated.", changed=update, **data)
+
+
+def main():
+ iface = NetAppESeriesIscsiTarget()
+ iface.update()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_ldap.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_ldap.py
new file mode 100644
index 000000000..18f2b622f
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_ldap.py
@@ -0,0 +1,391 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+---
+module: na_santricity_ldap
+short_description: NetApp E-Series manage LDAP integration to use for authentication
+description:
+ - Configure an E-Series system to allow authentication via an LDAP server
+author:
+ - Michael Price (@lmprice)
+ - Nathan Swartz (@ndswartz)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.santricity_doc
+options:
+ state:
+ description:
+ - When I(state=="present") the defined LDAP domain will be added to the storage system.
+ - When I(state=="absent") the domain specified will be removed from the storage system.
+ - I(state=="disabled") will result in deleting all existing LDAP domains on the storage system.
+ type: str
+ choices:
+ - present
+ - absent
+ - disabled
+ default: present
+ identifier:
+ description:
+ - This is a unique identifier for the configuration (for cases where there are multiple domains configured).
+ type: str
+ default: "default"
+ required: false
+ bind_user:
+ description:
+ - This is the user account that will be used for querying the LDAP server.
+ - Required when I(bind_password) is specified.
+ - "Example: CN=MyBindAcct,OU=ServiceAccounts,DC=example,DC=com"
+ type: str
+ required: false
+ bind_password:
+ description:
+ - This is the password for the bind user account.
+ - Required when I(bind_user) is specified.
+ type: str
+ required: false
+ server_url:
+ description:
+ - This is the LDAP server url.
+ - The connection string should be specified as using the ldap or ldaps protocol along with the port information.
+ type: str
+ required: false
+ names:
+ description:
+ - The domain name[s] that will be utilized when authenticating to identify which domain to utilize.
+ - Default to use the DNS name of the I(server).
+ - The only requirement is that the name[s] be resolvable.
+ - "Example: user@example.com"
+ type: list
+ required: false
+ search_base:
+ description:
+ - The search base is used to find group memberships of the user.
+ - "Example: ou=users,dc=example,dc=com"
+ type: str
+ required: false
+ role_mappings:
+ description:
+ - This is where you specify which groups should have access to what permissions for the
+ storage-system.
+ - For example, all users in group A will be assigned all 4 available roles, which will allow access
+ to all the management functionality of the system (super-user). Those in group B only have the
+ storage.monitor role, which will allow only read-only access.
+ - This is specified as a mapping of regular expressions to a list of roles. See the examples.
+ - The roles that will be assigned to to the group/groups matching the provided regex.
+ - storage.admin allows users full read/write access to storage objects and operations.
+ - storage.monitor allows users read-only access to storage objects and operations.
+ - support.admin allows users access to hardware, diagnostic information, the Major Event
+ Log, and other critical support-related functionality, but not the storage configuration.
+ - security.admin allows users access to authentication/authorization configuration, as well
+ as the audit log configuration, and certification management.
+ type: dict
+ required: false
+ group_attributes:
+ description:
+ - The user attributes that should be considered for the group to role mapping.
+ - Typically this is used with something like "memberOf", and a user"s access is tested against group
+ membership or lack thereof.
+ type: list
+ default: ["memberOf"]
+ required: false
+ user_attribute:
+ description:
+ - This is the attribute we will use to match the provided username when a user attempts to
+ authenticate.
+ type: str
+ default: "sAMAccountName"
+ required: false
+notes:
+ - Check mode is supported
+ - This module allows you to define one or more LDAP domains identified uniquely by I(identifier) to use for
+ authentication. Authorization is determined by I(role_mappings), in that different groups of users may be given
+ different (or no), access to certain aspects of the system and API.
+ - The local user accounts will still be available if the LDAP server becomes unavailable/inaccessible.
+ - Generally, you"ll need to get the details of your organization"s LDAP server before you"ll be able to configure
+ the system for using LDAP authentication; every implementation is likely to be very different.
+ - This API is currently only supported with the Embedded Web Services API v2.0 and higher, or the Web Services Proxy
+ v3.0 and higher.
+"""
+
+EXAMPLES = """
+ - name: Disable LDAP authentication
+ na_santricity_ldap:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ state: absent
+
+ - name: Remove the "default" LDAP domain configuration
+ na_santricity_ldap:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ state: absent
+ identifier: default
+
+ - name: Define a new LDAP domain, utilizing defaults where possible
+ na_santricity_ldap:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ state: enabled
+ bind_username: "CN=MyBindAccount,OU=ServiceAccounts,DC=example,DC=com"
+ bind_password: "mySecretPass"
+ server: "ldap://example.com:389"
+ search_base: "OU=Users,DC=example,DC=com"
+ role_mappings:
+ ".*dist-dev-storage.*":
+ - storage.admin
+ - security.admin
+ - support.admin
+ - storage.monitor
+"""
+
+RETURN = """
+msg:
+ description: Success message
+ returned: on success
+ type: str
+ sample: The ldap settings have been updated.
+"""
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule
+from ansible.module_utils._text import to_native
+
+try:
+ import urlparse
+except ImportError:
+ import urllib.parse as urlparse
+
+
+class NetAppESeriesLdap(NetAppESeriesModule):
+ NO_CHANGE_MSG = "No changes were necessary."
+ TEMPORARY_DOMAIN = "ANSIBLE_TMP_DOMAIN"
+
+ def __init__(self):
+ ansible_options = dict(state=dict(type="str", required=False, default="present", choices=["present", "absent", "disabled"]),
+ identifier=dict(type="str", required=False, default="default"),
+ bind_user=dict(type="str", required=False),
+ bind_password=dict(type="str", required=False, no_log=True),
+ names=dict(type="list", required=False),
+ server_url=dict(type="str", required=False),
+ search_base=dict(type="str", required=False),
+ role_mappings=dict(type="dict", required=False, no_log=True),
+ group_attributes=dict(type="list", default=["memberOf"], required=False),
+ user_attribute=dict(type="str", required=False, default="sAMAccountName"))
+
+ required_if = [["state", "present", ["server_url"]]]
+ required_together = [["bind_user", "bind_password"]]
+ super(NetAppESeriesLdap, self).__init__(ansible_options=ansible_options,
+ web_services_version="02.00.0000.0000",
+ required_if=required_if,
+ required_together=required_together,
+ supports_check_mode=True)
+
+ args = self.module.params
+ self.state = args["state"]
+ self.id = args["identifier"]
+ self.bind_user = args["bind_user"]
+ self.bind_password = args["bind_password"]
+ self.names = args["names"]
+ self.server = args["server_url"]
+ self.search_base = args["search_base"]
+ self.role_mappings = args["role_mappings"]
+ self.group_attributes = args["group_attributes"]
+ self.user_attribute = args["user_attribute"]
+
+ if self.server and not self.names:
+ parts = urlparse.urlparse(self.server)
+ self.names = [parts.netloc.split(':')[0]]
+
+ # Check whether request needs to be forwarded on to the controller web services rest api.
+ self.url_path_prefix = ""
+ if self.is_embedded():
+ self.url_path_prefix = "storage-systems/1/"
+ elif self.ssid != "0" and self.ssid.lower() != "proxy":
+ self.url_path_prefix = "storage-systems/%s/forward/devmgr/v2/storage-systems/1/" % self.ssid
+
+ self.existing_domain_ids = []
+ self.domain = {} # Existing LDAP domain
+ self.body = {} # Request body
+
+ def get_domains(self):
+ """Retrieve all domain information from storage system."""
+ domains = None
+ try:
+ rc, response = self.request(self.url_path_prefix + "ldap")
+ domains = response["ldapDomains"]
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve current LDAP configuration. Array Id [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ return domains
+
+ def build_request_body(self):
+ """Build the request body."""
+ self.body.update({"id": self.id, "groupAttributes": self.group_attributes, "ldapUrl": self.server, "names": self.names, "roleMapCollection": []})
+
+ if self.search_base:
+ self.body.update({"searchBase": self.search_base})
+ if self.user_attribute:
+ self.body.update({"userAttribute": self.user_attribute})
+ if self.bind_user and self.bind_password:
+ self.body.update({"bindLookupUser": {"password": self.bind_password, "user": self.bind_user}})
+ if self.role_mappings:
+ for regex, names in self.role_mappings.items():
+ for name in names:
+ self.body["roleMapCollection"].append({"groupRegex": regex, "ignorecase": True, "name": name})
+
+ def are_changes_required(self):
+ """Determine whether any changes are required and build request body."""
+ change_required = False
+ domains = self.get_domains()
+
+ if self.state == "disabled" and domains:
+ self.existing_domain_ids = [domain["id"] for domain in domains]
+ change_required = True
+
+ elif self.state == "present":
+ for domain in domains:
+ if self.id == domain["id"]:
+ self.domain = domain
+
+ if self.state == "absent":
+ change_required = True
+ elif (len(self.group_attributes) != len(domain["groupAttributes"]) or
+ any([a not in domain["groupAttributes"] for a in self.group_attributes])):
+ change_required = True
+ elif self.user_attribute != domain["userAttribute"]:
+ change_required = True
+ elif self.search_base.lower() != domain["searchBase"].lower():
+ change_required = True
+ elif self.server != domain["ldapUrl"]:
+ change_required = True
+ elif any(name not in domain["names"] for name in self.names) or any(name not in self.names for name in domain["names"]):
+ change_required = True
+ elif self.role_mappings:
+ if len(self.body["roleMapCollection"]) != len(domain["roleMapCollection"]):
+ change_required = True
+ else:
+ for role_map in self.body["roleMapCollection"]:
+ for existing_role_map in domain["roleMapCollection"]:
+ if role_map["groupRegex"] == existing_role_map["groupRegex"] and role_map["name"] == existing_role_map["name"]:
+ break
+ else:
+ change_required = True
+
+ if not change_required and self.bind_user and self.bind_password:
+ if self.bind_user != domain["bindLookupUser"]["user"]:
+ change_required = True
+ elif self.bind_password:
+ temporary_domain = None
+ try:
+ # Check whether temporary domain exists
+ if any(domain["id"] == self.TEMPORARY_DOMAIN for domain in domains):
+ self.delete_domain(self.TEMPORARY_DOMAIN)
+
+ temporary_domain = self.add_domain(temporary=True, skip_test=True)
+ rc, tests = self.request(self.url_path_prefix + "ldap/test", method="POST")
+
+ temporary_domain_test = {}
+ domain_test = {}
+ for test in tests:
+ if test["id"] == temporary_domain["id"]:
+ temporary_domain_test = test["result"]
+ if self.id == test["id"]:
+ domain_test = test["result"]
+
+ if temporary_domain_test["authenticationTestResult"] == "ok" and domain_test["authenticationTestResult"] != "ok":
+ change_required = True
+ elif temporary_domain_test["authenticationTestResult"] != "ok":
+ self.module.fail_json(msg="Failed to authenticate bind credentials! Array Id [%s]." % self.ssid)
+
+ finally:
+ if temporary_domain:
+ self.delete_domain(self.TEMPORARY_DOMAIN)
+ break
+ else:
+ change_required = True
+ elif self.state == "absent":
+ for domain in domains:
+ if self.id == domain["id"]:
+ change_required = True
+
+ return change_required
+
+ def add_domain(self, temporary=False, skip_test=False):
+ """Add domain to storage system."""
+ domain = None
+ body = self.body.copy()
+ if temporary:
+ body.update({"id": self.TEMPORARY_DOMAIN, "names": [self.TEMPORARY_DOMAIN]})
+
+ try:
+ rc, response = self.request(self.url_path_prefix + "ldap/addDomain?skipTest=%s" % ("true" if not skip_test else "false"),
+ method="POST", data=body)
+ domain = response["ldapDomains"][0]
+ except Exception as error:
+ self.module.fail_json(msg="Failed to create LDAP domain. Array Id [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ return domain
+
+ def update_domain(self):
+ """Update existing domain on storage system."""
+ try:
+ rc, response = self.request(self.url_path_prefix + "ldap/%s" % self.domain["id"], method="POST", data=self.body)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to update LDAP domain. Array Id [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ def delete_domain(self, domain_id):
+ """Delete specific domain on the storage system."""
+ try:
+ url = self.url_path_prefix + "ldap/%s" % domain_id
+ rc, response = self.request(self.url_path_prefix + "ldap/%s" % domain_id, method="DELETE")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to delete LDAP domain. Array Id [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ def disable_domains(self):
+ """Delete all existing domains on storage system."""
+ for domain_id in self.existing_domain_ids:
+ self.delete_domain(domain_id)
+
+ def apply(self):
+ """Apply any necessary changes to the LDAP configuration."""
+ self.build_request_body()
+ change_required = self.are_changes_required()
+
+ if change_required and not self.module.check_mode:
+ if self.state == "present":
+ if self.domain:
+ self.update_domain()
+ self.module.exit_json(msg="LDAP domain has been updated. Array Id: [%s]" % self.ssid, changed=change_required)
+ else:
+ self.add_domain()
+ self.module.exit_json(msg="LDAP domain has been added. Array Id: [%s]" % self.ssid, changed=change_required)
+ elif self.state == "absent":
+ if self.domain:
+ self.delete_domain(self.domain["id"])
+ self.module.exit_json(msg="LDAP domain has been removed. Array Id: [%s]" % self.ssid, changed=change_required)
+ else:
+ self.disable_domains()
+ self.module.exit_json(msg="All LDAP domains have been removed. Array Id: [%s]" % self.ssid, changed=change_required)
+
+ self.module.exit_json(msg="No changes have been made to the LDAP configuration. Array Id: [%s]" % self.ssid, changed=change_required)
+
+
+def main():
+ ldap = NetAppESeriesLdap()
+ ldap.apply()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_lun_mapping.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_lun_mapping.py
new file mode 100644
index 000000000..d3d70fb5d
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_lun_mapping.py
@@ -0,0 +1,247 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: na_santricity_lun_mapping
+author:
+ - Kevin Hulquest (@hulquest)
+ - Nathan Swartz (@ndswartz)
+short_description: NetApp E-Series manage lun mappings
+description:
+ - Create, delete, or modify mappings between a volume and a targeted host/host+ group.
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.santricity_doc
+options:
+ state:
+ description:
+ - Present will ensure the mapping exists, absent will remove the mapping.
+ type: str
+ required: False
+ choices: ["present", "absent"]
+ default: "present"
+ target:
+ description:
+ - The name of host or hostgroup you wish to assign to the mapping
+ - If omitted, the default hostgroup is used.
+ - If the supplied I(volume_name) is associated with a different target, it will be updated to what is supplied here.
+ type: str
+ required: False
+ volume_name:
+ description:
+ - The name of the volume you wish to include in the mapping.
+ - Use ACCESS_VOLUME to reference the in-band access management volume.
+ type: str
+ required: True
+ aliases:
+ - volume
+ lun:
+ description:
+ - The LUN value you wish to give the mapping.
+ - If the supplied I(volume_name) is associated with a different LUN, it will be updated to what is supplied here.
+ - LUN value will be determine by the storage-system when not specified.
+ type: int
+ required: false
+'''
+
+EXAMPLES = '''
+---
+ - name: Map volume1 to the host target host1
+ na_santricity_lun_mapping:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ state: present
+ target: host1
+ volume: volume1
+ - name: Delete the lun mapping between volume1 and host1
+ na_santricity_lun_mapping:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ state: absent
+ target: host1
+ volume: volume1
+'''
+RETURN = '''
+msg:
+ description: success of the module
+ returned: always
+ type: str
+ sample: Lun mapping is complete
+'''
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule
+from ansible.module_utils._text import to_native
+
+
+class NetAppESeriesLunMapping(NetAppESeriesModule):
+ def __init__(self):
+ ansible_options = dict(state=dict(required=False, choices=["present", "absent"], default="present"),
+ target=dict(required=False, default=None),
+ volume_name=dict(required=True, aliases=["volume"]),
+ lun=dict(type="int", required=False))
+
+ super(NetAppESeriesLunMapping, self).__init__(ansible_options=ansible_options,
+ web_services_version="02.00.0000.0000",
+ supports_check_mode=True)
+
+ args = self.module.params
+ self.state = args["state"] == "present"
+ self.target = args["target"] if args["target"] else "DEFAULT_HOSTGROUP"
+ self.volume = args["volume_name"] if args["volume_name"] != "ACCESS_VOLUME" else "Access"
+ self.lun = args["lun"]
+ self.check_mode = self.module.check_mode
+ self.mapping_info = None
+
+ if not self.url.endswith('/'):
+ self.url += '/'
+
+ def update_mapping_info(self):
+ """Collect the current state of the storage array."""
+ response = None
+ try:
+ rc, response = self.request("storage-systems/%s/graph" % self.ssid)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve storage array graph. Id [%s]. Error [%s]" % (self.ssid, to_native(error)))
+
+ # Create dictionary containing host/cluster references mapped to their names
+ target_reference = {}
+ target_name = {}
+ target_type = {}
+
+ for host in response["storagePoolBundle"]["host"]:
+ target_reference.update({host["hostRef"]: host["name"]})
+ target_name.update({host["name"]: host["hostRef"]})
+ target_type.update({host["name"]: "host"})
+
+ for cluster in response["storagePoolBundle"]["cluster"]:
+
+ # Verify there is no ambiguity between target's type (ie host and group have the same name)
+ if cluster["name"] == self.target and self.target in target_name.keys():
+ self.module.fail_json(msg="Ambiguous target type: target name is used for both host and group targets! Id [%s]" % self.ssid)
+
+ target_reference.update({cluster["clusterRef"]: cluster["name"]})
+ target_name.update({cluster["name"]: cluster["clusterRef"]})
+ target_type.update({cluster["name"]: "group"})
+
+ target_reference.update({"0000000000000000000000000000000000000000": "DEFAULT_HOSTGROUP"})
+ target_name.update({"DEFAULT_HOSTGROUP": "0000000000000000000000000000000000000000"})
+ target_type.update({"DEFAULT_HOSTGROUP": "group"})
+
+ volume_reference = {}
+ volume_name = {}
+ lun_name = {}
+ for volume in response["volume"]:
+ volume_reference.update({volume["volumeRef"]: volume["name"]})
+ volume_name.update({volume["name"]: volume["volumeRef"]})
+ if volume["listOfMappings"]:
+ lun_name.update({volume["name"]: volume["listOfMappings"][0]["lun"]})
+ for volume in response["highLevelVolBundle"]["thinVolume"]:
+ volume_reference.update({volume["volumeRef"]: volume["name"]})
+ volume_name.update({volume["name"]: volume["volumeRef"]})
+ if volume["listOfMappings"]:
+ lun_name.update({volume["name"]: volume["listOfMappings"][0]["lun"]})
+
+ volume_name.update({response["sa"]["accessVolume"]["name"]: response["sa"]["accessVolume"]["accessVolumeRef"]})
+ volume_reference.update({response["sa"]["accessVolume"]["accessVolumeRef"]: response["sa"]["accessVolume"]["name"]})
+
+ # Build current mapping object
+ self.mapping_info = dict(lun_mapping=[dict(volume_reference=mapping["volumeRef"],
+ map_reference=mapping["mapRef"],
+ lun_mapping_reference=mapping["lunMappingRef"],
+ lun=mapping["lun"]
+ ) for mapping in response["storagePoolBundle"]["lunMapping"]],
+ volume_by_reference=volume_reference,
+ volume_by_name=volume_name,
+ lun_by_name=lun_name,
+ target_by_reference=target_reference,
+ target_by_name=target_name,
+ target_type_by_name=target_type)
+
+ def get_lun_mapping(self):
+ """Find the matching lun mapping reference.
+
+ Returns: tuple(bool, int, int): contains volume match, volume mapping reference and mapping lun
+ """
+ target_match = False
+ reference = None
+ lun = None
+
+ self.update_mapping_info()
+
+ # Verify that when a lun is specified that it does not match an existing lun value unless it is associated with
+ # the specified volume (ie for an update)
+ if self.lun and any((self.lun == lun_mapping["lun"] and
+ self.target == self.mapping_info["target_by_reference"][lun_mapping["map_reference"]] and
+ self.volume != self.mapping_info["volume_by_reference"][lun_mapping["volume_reference"]]
+ ) for lun_mapping in self.mapping_info["lun_mapping"]):
+ self.module.fail_json(msg="Option lun value is already in use for target! Array Id [%s]." % self.ssid)
+
+ # Verify volume and target exist if needed for expected state.
+ if self.state:
+ if self.volume not in self.mapping_info["volume_by_name"].keys():
+ self.module.fail_json(msg="Volume does not exist. Id [%s]." % self.ssid)
+ if self.target and self.target not in self.mapping_info["target_by_name"].keys():
+ self.module.fail_json(msg="Target does not exist. Id [%s'." % self.ssid)
+
+ for lun_mapping in self.mapping_info["lun_mapping"]:
+
+ # Find matching volume reference
+ if lun_mapping["volume_reference"] == self.mapping_info["volume_by_name"][self.volume]:
+ reference = lun_mapping["lun_mapping_reference"]
+ lun = lun_mapping["lun"]
+
+ # Determine if lun mapping is attached to target with the
+ if (lun_mapping["map_reference"] in self.mapping_info["target_by_reference"].keys() and
+ self.mapping_info["target_by_reference"][lun_mapping["map_reference"]] == self.target and
+ (self.lun is None or lun == self.lun)):
+ target_match = True
+
+ return target_match, reference, lun
+
+ def update(self):
+ """Execute the changes the require changes on the storage array."""
+ target_match, lun_reference, lun = self.get_lun_mapping()
+ update = (self.state and not target_match) or (not self.state and lun_reference)
+
+ if update and not self.check_mode:
+ try:
+ if self.state:
+ body = dict()
+ target = None if not self.target else self.mapping_info["target_by_name"][self.target]
+ if target:
+ body.update(dict(targetId=target))
+ if self.lun is not None:
+ body.update(dict(lun=self.lun))
+
+ if lun_reference:
+
+ rc, response = self.request("storage-systems/%s/volume-mappings/%s/move" % (self.ssid, lun_reference), method="POST", data=body)
+ else:
+ body.update(dict(mappableObjectId=self.mapping_info["volume_by_name"][self.volume]))
+ rc, response = self.request("storage-systems/%s/volume-mappings" % self.ssid, method="POST", data=body)
+
+ else: # Remove existing lun mapping for volume and target
+ rc, response = self.request("storage-systems/%s/volume-mappings/%s" % (self.ssid, lun_reference), method="DELETE")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to update storage array lun mapping. Id [%s]. Error [%s]" % (self.ssid, to_native(error)))
+
+ self.module.exit_json(msg="Lun mapping is complete.", changed=update)
+
+
+def main():
+ mapping = NetAppESeriesLunMapping()
+ mapping.update()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_mgmt_interface.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_mgmt_interface.py
new file mode 100644
index 000000000..f4bef849c
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_mgmt_interface.py
@@ -0,0 +1,656 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = """
+---
+module: na_santricity_mgmt_interface
+short_description: NetApp E-Series manage management interface configuration
+description:
+ - Configure the E-Series management interfaces
+author:
+ - Michael Price (@lmprice)
+ - Nathan Swartz (@ndswartz)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.santricity_doc
+options:
+ state:
+ description:
+ - Enable or disable IPv4 network interface configuration.
+ - Either IPv4 or IPv6 must be enabled otherwise error will occur.
+ - Assumed to be I(state==enabled) when I(config_method) is specified unless defined.
+ choices:
+ - enabled
+ - disabled
+ type: str
+ required: false
+ controller:
+ description:
+ - The controller that owns the port you want to configure.
+ - Controller names are represented alphabetically, with the first controller as A,
+ the second as B, and so on.
+ - Current hardware models have either 1 or 2 available controllers, but that is not a guaranteed hard
+ limitation and could change in the future.
+ choices:
+ - A
+ - B
+ type: str
+ required: true
+ port:
+ description:
+ - The ethernet port configuration to modify.
+ - The channel represents the port number left to right on the controller, beginning with 1.
+ - Required when I(config_method) is specified.
+ type: int
+ required: false
+ address:
+ description:
+ - The IPv4 address to assign to the interface.
+ - Should be specified in xx.xx.xx.xx form.
+ - Mutually exclusive with I(config_method=dhcp)
+ type: str
+ required: false
+ subnet_mask:
+ description:
+ - The subnet mask to utilize for the interface.
+ - Should be specified in xx.xx.xx.xx form.
+ - Mutually exclusive with I(config_method=dhcp)
+ type: str
+ required: false
+ gateway:
+ description:
+ - The IPv4 gateway address to utilize for the interface.
+ - Should be specified in xx.xx.xx.xx form.
+ - Mutually exclusive with I(config_method=dhcp)
+ type: str
+ required: false
+ config_method:
+ description:
+ - The configuration method type to use for network interface ports.
+ - dhcp is mutually exclusive with I(address), I(subnet_mask), and I(gateway).
+ choices:
+ - dhcp
+ - static
+ type: str
+ required: false
+ dns_config_method:
+ description:
+ - The configuration method type to use for DNS services.
+ - dhcp is mutually exclusive with I(dns_address), and I(dns_address_backup).
+ choices:
+ - dhcp
+ - static
+ type: str
+ required: false
+ dns_address:
+ description:
+ - Primary IPv4 or IPv6 DNS server address
+ type: str
+ required: false
+ dns_address_backup:
+ description:
+ - Secondary IPv4 or IPv6 DNS server address
+ type: str
+ required: false
+ ntp_config_method:
+ description:
+ - The configuration method type to use for NTP services.
+ - disable is mutually exclusive with I(ntp_address) and I(ntp_address_backup).
+ - dhcp is mutually exclusive with I(ntp_address) and I(ntp_address_backup).
+ choices:
+ - disabled
+ - dhcp
+ - static
+ type: str
+ required: false
+ ntp_address:
+ description:
+ - Primary IPv4, IPv6, or FQDN NTP server address
+ type: str
+ required: false
+ ntp_address_backup:
+ description:
+ - Secondary IPv4, IPv6, or FQDN NTP server address
+ type: str
+ required: false
+ ssh:
+ description:
+ - Enable ssh access to the controller for debug purposes.
+ - This is a controller-level setting.
+ - rlogin/telnet will be enabled for ancient equipment where ssh is not available.
+ type: bool
+ required: false
+notes:
+ - Check mode is supported.
+ - It is highly recommended to have a minimum of one up management port on each controller.
+ - When using SANtricity Web Services Proxy, use M(na_santricity_storage_system) to update management paths. This is required because of a known issue
+ and will be addressed in the proxy version 4.1. After the resolution the management ports should automatically be updated.
+ - The interface settings are applied synchronously, but changes to the interface itself (receiving a new IP address
+ via dhcp, etc), can take seconds or minutes longer to take effect.
+"""
+
+EXAMPLES = """
+ - name: Configure the first port on the A controller with a static IPv4 address
+ na_santricity_mgmt_interface:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ port: "1"
+ controller: "A"
+ config_method: static
+ address: "192.168.1.100"
+ subnet_mask: "255.255.255.0"
+ gateway: "192.168.1.1"
+
+ - name: Disable ipv4 connectivity for the second port on the B controller
+ na_santricity_mgmt_interface:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ port: "2"
+ controller: "B"
+ enable_interface: no
+
+ - name: Enable ssh access for ports one and two on controller A
+ na_santricity_mgmt_interface:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ port: "1"
+ controller: "A"
+ ssh: yes
+
+ - name: Configure static DNS settings for the first port on controller A
+ na_santricity_mgmt_interface:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ port: "1"
+ controller: "A"
+ dns_config_method: static
+ dns_address: "192.168.1.100"
+ dns_address_backup: "192.168.1.1"
+
+"""
+
+RETURN = """
+msg:
+ description: Success message
+ returned: on success
+ type: str
+ sample: The interface settings have been updated.
+available_embedded_api_urls:
+ description: List containing available web services embedded REST API urls
+ returned: on success
+ type: list
+ sample:
+"""
+from time import sleep
+
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule
+from ansible.module_utils._text import to_native
+from ansible.module_utils import six
+
+try:
+ import urlparse
+except ImportError:
+ import urllib.parse as urlparse
+
+try:
+ import ipaddress
+except ImportError:
+ HAS_IPADDRESS = False
+else:
+ HAS_IPADDRESS = True
+
+
+def is_ipv4(address):
+ """Determine whether address is IPv4."""
+ try:
+ if six.PY2:
+ address = six.u(address)
+ ipaddress.IPv4Address(address)
+ return True
+ except Exception as error:
+ return False
+
+
+def is_ipv6(address):
+ """Determine whether address is IPv6."""
+ try:
+ if six.PY2:
+ address = six.u(address)
+ ipaddress.IPv6Address(address)
+ return True
+ except Exception as error:
+ return False
+
+
+class NetAppESeriesMgmtInterface(NetAppESeriesModule):
+ MAXIMUM_VERIFICATION_TIMEOUT = 120
+
+ def __init__(self):
+ ansible_options = dict(state=dict(type="str", choices=["enabled", "disabled"], required=False),
+ controller=dict(type="str", required=True, choices=["A", "B"]),
+ port=dict(type="int"),
+ address=dict(type="str", required=False),
+ subnet_mask=dict(type="str", required=False),
+ gateway=dict(type="str", required=False),
+ config_method=dict(type="str", required=False, choices=["dhcp", "static"]),
+ dns_config_method=dict(type="str", required=False, choices=["dhcp", "static"]),
+ dns_address=dict(type="str", required=False),
+ dns_address_backup=dict(type="str", required=False),
+ ntp_config_method=dict(type="str", required=False, choices=["disabled", "dhcp", "static"]),
+ ntp_address=dict(type="str", required=False),
+ ntp_address_backup=dict(type="str", required=False),
+ ssh=dict(type="bool", required=False))
+
+ required_if = [["config_method", "static", ["port", "address", "subnet_mask"]],
+ ["dns_config_method", "static", ["dns_address"]],
+ ["ntp_config_method", "static", ["ntp_address"]]]
+
+ super(NetAppESeriesMgmtInterface, self).__init__(ansible_options=ansible_options,
+ web_services_version="02.00.0000.0000",
+ required_if=required_if,
+ supports_check_mode=True)
+
+ args = self.module.params
+ if args["state"] is None:
+ if args["config_method"] is not None:
+ self.enable_interface = True
+ else:
+ self.enable_interface = None
+ else:
+ self.enable_interface = args["state"] == "enabled"
+
+ self.controller = args["controller"]
+ self.channel = args["port"]
+
+ self.config_method = args["config_method"]
+ self.address = args["address"]
+ self.subnet_mask = args["subnet_mask"]
+ self.gateway = args["gateway"]
+
+ self.dns_config_method = args["dns_config_method"]
+ self.dns_address = args["dns_address"]
+ self.dns_address_backup = args["dns_address_backup"]
+
+ self.ntp_config_method = args["ntp_config_method"]
+ self.ntp_address = args["ntp_address"]
+ self.ntp_address_backup = args["ntp_address_backup"]
+
+ self.ssh = args["ssh"]
+
+ self.body = {}
+ self.interface_info = {}
+ self.alt_interface_addresses = []
+ self.all_interface_addresses = []
+ self.use_alternate_address = False
+ self.alt_url_path = None
+
+ self.available_embedded_api_urls = []
+
+ def get_controllers(self):
+ """Retrieve a mapping of controller labels to their references
+ :return: controllers defined on the system. Example: {'A': '070000000000000000000001', 'B': '070000000000000000000002'}
+ """
+ try:
+ rc, controllers = self.request("storage-systems/%s/controllers" % self.ssid)
+ except Exception as err:
+ controllers = list()
+ self.module.fail_json(msg="Failed to retrieve the controller settings. Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+
+ controllers.sort(key=lambda c: c['physicalLocation']['slot'])
+ controllers_dict = dict()
+ i = ord('A')
+ for controller in controllers:
+ label = chr(i)
+ settings = dict(controllerSlot=controller['physicalLocation']['slot'],
+ controllerRef=controller['controllerRef'],
+ ssh=controller['networkSettings']['remoteAccessEnabled'])
+ controllers_dict[label] = settings
+ i += 1
+ return controllers_dict
+
+ def update_target_interface_info(self, retries=60):
+ """Discover and update cached interface info."""
+ net_interfaces = list()
+ try:
+ rc, net_interfaces = self.request("storage-systems/%s/configuration/ethernet-interfaces" % self.ssid)
+ except Exception as error:
+ if retries > 0:
+ self.update_target_interface_info(retries=retries - 1)
+ return
+ else:
+ self.module.fail_json(msg="Failed to retrieve defined management interfaces. Array Id [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ iface = None
+ channels = {}
+ controller_info = self.get_controllers()[self.controller]
+ controller_ref = controller_info["controllerRef"]
+ controller_ssh = controller_info["ssh"]
+ controller_dns = None
+ controller_ntp = None
+ dummy_interface_id = None # Needed for when a specific interface is not required (ie dns/ntp/ssh changes only)
+ for net in net_interfaces:
+ if net["controllerRef"] == controller_ref:
+ channels.update({net["channel"]: net["linkStatus"]})
+ if dummy_interface_id is None:
+ dummy_interface_id = net["interfaceRef"]
+ if controller_dns is None:
+ controller_dns = net["dnsProperties"]
+ if controller_ntp is None:
+ controller_ntp = net["ntpProperties"]
+
+ if net["ipv4Enabled"] and net["linkStatus"] == "up":
+ self.all_interface_addresses.append(net["ipv4Address"])
+ if net["controllerRef"] == controller_ref and net["channel"] == self.channel:
+ iface = net
+ elif net["ipv4Enabled"] and net["linkStatus"] == "up":
+ self.alt_interface_addresses.append(net["ipv4Address"])
+
+ # Add controller specific information (ssh, dns and ntp)
+ self.interface_info.update({
+ "id": dummy_interface_id,
+ "controllerRef": controller_ref,
+ "ssh": controller_ssh,
+ "dns_config_method": controller_dns["acquisitionProperties"]["dnsAcquisitionType"],
+ "dns_servers": controller_dns["acquisitionProperties"]["dnsServers"],
+ "ntp_config_method": controller_ntp["acquisitionProperties"]["ntpAcquisitionType"],
+ "ntp_servers": controller_ntp["acquisitionProperties"]["ntpServers"],})
+
+ # Add interface specific information when configuring IP address.
+ if self.config_method is not None:
+ if iface is None:
+ available_controllers = ["%s (%s)" % (channel, status) for channel, status in channels.items()]
+ self.module.fail_json(msg="Invalid port number! Controller %s ports: [%s]. Array [%s]"
+ % (self.controller, ",".join(available_controllers), self.ssid))
+ else:
+ self.interface_info.update({
+ "id": iface["interfaceRef"],
+ "controllerSlot": iface["controllerSlot"],
+ "channel": iface["channel"],
+ "link_status": iface["linkStatus"],
+ "enabled": iface["ipv4Enabled"],
+ "config_method": iface["ipv4AddressConfigMethod"],
+ "address": iface["ipv4Address"],
+ "subnet_mask": iface["ipv4SubnetMask"],
+ "gateway": iface["ipv4GatewayAddress"],
+ "ipv6_enabled": iface["ipv6Enabled"],})
+
+ def update_body_enable_interface_setting(self):
+ """Enable or disable the IPv4 network interface."""
+ change_required = False
+ if not self.enable_interface and not self.interface_info["ipv6_enabled"]:
+ self.module.fail_json(msg="Either IPv4 or IPv6 must be enabled. Array [%s]." % self.ssid)
+
+ if self.enable_interface != self.interface_info["enabled"]:
+ change_required = True
+ self.body.update({"ipv4Enabled": self.enable_interface})
+ return change_required
+
+ def update_body_interface_settings(self):
+ """Update network interface settings."""
+ change_required = False
+ if self.config_method == "dhcp":
+ if self.interface_info["config_method"] != "configDhcp":
+ if self.interface_info["address"] in self.url:
+ self.use_alternate_address = True
+ change_required = True
+ self.body.update({"ipv4AddressConfigMethod": "configDhcp"})
+ else:
+ self.body.update({"ipv4AddressConfigMethod": "configStatic", "ipv4Address": self.address, "ipv4SubnetMask": self.subnet_mask})
+ if self.interface_info["config_method"] != "configStatic":
+ change_required = True
+ if self.address and self.interface_info["address"] != self.address:
+ if self.interface_info["address"] in self.url:
+ self.use_alternate_address = True
+ change_required = True
+ if self.subnet_mask and self.interface_info["subnet_mask"] != self.subnet_mask:
+ change_required = True
+ if self.gateway and self.interface_info["gateway"] != self.gateway:
+ self.body.update({"ipv4GatewayAddress": self.gateway})
+ change_required = True
+
+ return change_required
+
+ def update_body_dns_server_settings(self):
+ """Add DNS server information to the request body."""
+ change_required = False
+ if self.dns_config_method == "dhcp":
+ if self.interface_info["dns_config_method"] != "dhcp":
+ change_required = True
+ self.body.update({"dnsAcquisitionDescriptor": {"dnsAcquisitionType": "dhcp"}})
+
+ elif self.dns_config_method == "static":
+ dns_servers = []
+ if ((self.dns_address and self.dns_address_backup and (not self.interface_info["dns_servers"] or
+ len(self.interface_info["dns_servers"]) != 2)) or
+ (self.dns_address and not self.dns_address_backup and (not self.interface_info["dns_servers"] or
+ len(self.interface_info["dns_servers"]) != 1))):
+ change_required = True
+
+ # Check primary DNS address
+ if self.dns_address:
+ if is_ipv4(self.dns_address):
+ dns_servers.append({"addressType": "ipv4", "ipv4Address": self.dns_address})
+ if (not self.interface_info["dns_servers"] or len(self.interface_info["dns_servers"]) < 1 or
+ self.interface_info["dns_servers"][0]["addressType"] != "ipv4" or
+ self.interface_info["dns_servers"][0]["ipv4Address"] != self.dns_address):
+ change_required = True
+ elif is_ipv6(self.dns_address):
+ dns_servers.append({"addressType": "ipv6", "ipv6Address": self.dns_address})
+ if (not self.interface_info["dns_servers"] or len(self.interface_info["dns_servers"]) < 1 or
+ self.interface_info["dns_servers"][0]["addressType"] != "ipv6" or
+ self.interface_info["dns_servers"][0]["ipv6Address"].replace(":", "").lower() != self.dns_address.replace(":", "").lower()):
+ change_required = True
+ else:
+ self.module.fail_json(msg="Invalid IP address! DNS address must be either IPv4 or IPv6. Address [%s]."
+ " Array [%s]." % (self.dns_address, self.ssid))
+
+ # Check secondary DNS address
+ if self.dns_address_backup:
+ if is_ipv4(self.dns_address_backup):
+ dns_servers.append({"addressType": "ipv4", "ipv4Address": self.dns_address_backup})
+ if (not self.interface_info["dns_servers"] or len(self.interface_info["dns_servers"]) < 2 or
+ self.interface_info["dns_servers"][1]["addressType"] != "ipv4" or
+ self.interface_info["dns_servers"][1]["ipv4Address"] != self.dns_address_backup):
+ change_required = True
+ elif is_ipv6(self.dns_address_backup):
+ dns_servers.append({"addressType": "ipv6", "ipv6Address": self.dns_address_backup})
+ if (not self.interface_info["dns_servers"] or len(self.interface_info["dns_servers"]) < 2 or
+ self.interface_info["dns_servers"][1]["addressType"] != "ipv6" or
+ self.interface_info["dns_servers"][1]["ipv6Address"].replace(":", "").lower() != self.dns_address_backup.replace(":", "").lower()):
+ change_required = True
+ else:
+ self.module.fail_json(msg="Invalid IP address! DNS address must be either IPv4 or IPv6. Address [%s]."
+ " Array [%s]." % (self.dns_address, self.ssid))
+
+ self.body.update({"dnsAcquisitionDescriptor": {"dnsAcquisitionType": "stat", "dnsServers": dns_servers}})
+
+ return change_required
+
+ def update_body_ntp_server_settings(self):
+ """Add NTP server information to the request body."""
+ change_required = False
+ if self.ntp_config_method == "disabled":
+ if self.interface_info["ntp_config_method"] != "disabled":
+ change_required = True
+ self.body.update({"ntpAcquisitionDescriptor": {"ntpAcquisitionType": "disabled"}})
+
+ elif self.ntp_config_method == "dhcp":
+ if self.interface_info["ntp_config_method"] != "dhcp":
+ change_required = True
+ self.body.update({"ntpAcquisitionDescriptor": {"ntpAcquisitionType": "dhcp"}})
+
+ elif self.ntp_config_method == "static":
+ ntp_servers = []
+ if ((self.ntp_address and self.ntp_address_backup and (not self.interface_info["ntp_servers"] or
+ len(self.interface_info["ntp_servers"]) != 2)) or
+ (self.ntp_address and not self.ntp_address_backup and (not self.interface_info["ntp_servers"] or
+ len(self.interface_info["ntp_servers"]) != 1))):
+ change_required = True
+
+ # Check primary NTP address
+ if self.ntp_address:
+ if is_ipv4(self.ntp_address):
+ ntp_servers.append({"addrType": "ipvx", "ipvxAddress": {"addressType": "ipv4", "ipv4Address": self.ntp_address}})
+ if (not self.interface_info["ntp_servers"] or len(self.interface_info["ntp_servers"]) < 1 or
+ self.interface_info["ntp_servers"][0]["addrType"] != "ipvx" or
+ self.interface_info["ntp_servers"][0]["ipvxAddress"]["addressType"] != "ipv4" or
+ self.interface_info["ntp_servers"][0]["ipvxAddress"]["ipv4Address"] != self.ntp_address):
+ change_required = True
+ elif is_ipv6(self.ntp_address):
+ ntp_servers.append({"addrType": "ipvx", "ipvxAddress": {"addressType": "ipv6", "ipv6Address": self.ntp_address}})
+ if (not self.interface_info["ntp_servers"] or len(self.interface_info["ntp_servers"]) < 1 or
+ self.interface_info["ntp_servers"][0]["addrType"] != "ipvx" or
+ self.interface_info["ntp_servers"][0]["ipvxAddress"]["addressType"] != "ipv6" or
+ self.interface_info["ntp_servers"][0]["ipvxAddress"][
+ "ipv6Address"].replace(":", "").lower() != self.ntp_address.replace(":", "").lower()):
+ change_required = True
+ else:
+ ntp_servers.append({"addrType": "domainName", "domainName": self.ntp_address})
+ if (not self.interface_info["ntp_servers"] or len(self.interface_info["ntp_servers"]) < 1 or
+ self.interface_info["ntp_servers"][0]["addrType"] != "domainName" or
+ self.interface_info["ntp_servers"][0]["domainName"] != self.ntp_address):
+ change_required = True
+
+ # Check secondary NTP address
+ if self.ntp_address_backup:
+ if is_ipv4(self.ntp_address_backup):
+ ntp_servers.append({"addrType": "ipvx", "ipvxAddress": {"addressType": "ipv4", "ipv4Address": self.ntp_address_backup}})
+ if (not self.interface_info["ntp_servers"] or len(self.interface_info["ntp_servers"]) < 2 or
+ self.interface_info["ntp_servers"][1]["addrType"] != "ipvx" or
+ self.interface_info["ntp_servers"][1]["ipvxAddress"]["addressType"] != "ipv4" or
+ self.interface_info["ntp_servers"][1]["ipvxAddress"]["ipv4Address"] != self.ntp_address_backup):
+ change_required = True
+ elif is_ipv6(self.ntp_address_backup):
+ ntp_servers.append({"addrType": "ipvx", "ipvxAddress": {"addressType": "ipv6", "ipv6Address": self.ntp_address_backup}})
+ if (not self.interface_info["ntp_servers"] or len(self.interface_info["ntp_servers"]) < 2 or
+ self.interface_info["ntp_servers"][1]["addrType"] != "ipvx" or
+ self.interface_info["ntp_servers"][1]["ipvxAddress"]["addressType"] != "ipv6" or
+ self.interface_info["ntp_servers"][1]["ipvxAddress"][
+ "ipv6Address"].replace(":", "").lower() != self.ntp_address_backup.replace(":", "").lower()):
+ change_required = True
+ else:
+ ntp_servers.append({"addrType": "domainName", "domainName": self.ntp_address_backup})
+ if (not self.interface_info["ntp_servers"] or len(self.interface_info["ntp_servers"]) < 2 or
+ self.interface_info["ntp_servers"][1]["addrType"] != "domainName" or
+ self.interface_info["ntp_servers"][1]["domainName"].lower() != self.ntp_address_backup.lower()):
+ change_required = True
+
+ self.body.update({"ntpAcquisitionDescriptor": {"ntpAcquisitionType": "stat", "ntpServers": ntp_servers}})
+
+ return change_required
+
+ def update_body_ssh_setting(self):
+ """Configure network interface ports for remote ssh access."""
+ change_required = False
+ if self.interface_info["ssh"] != self.ssh:
+ change_required = True
+ self.body.update({"enableRemoteAccess": self.ssh})
+ return change_required
+
+ def update_request_body(self):
+ """Verify all required changes have been made."""
+ self.update_target_interface_info()
+ self.body = {"controllerRef": self.get_controllers()[self.controller]["controllerRef"], "interfaceRef": self.interface_info["id"]}
+
+ change_required = False
+ if self.enable_interface is not None:
+ change_required = self.update_body_enable_interface_setting()
+ if self.config_method is not None:
+ change_required = self.update_body_interface_settings() or change_required
+ if self.dns_config_method is not None:
+ change_required = self.update_body_dns_server_settings() or change_required
+ if self.ntp_config_method is not None:
+ change_required = self.update_body_ntp_server_settings() or change_required
+ if self.ssh is not None:
+ change_required = self.update_body_ssh_setting() or change_required
+
+ self.module.log("update_request_body change_required: %s" % change_required)
+ return change_required
+
+ def update_url(self, retries=60):
+ """Update eseries base class url if on is available."""
+ for address in self.alt_interface_addresses:
+ if address not in self.url and address != "0.0.0.0":
+ parsed_url = urlparse.urlparse(self.url)
+ location = parsed_url.netloc.split(":")
+ location[0] = address
+ self.url = "%s://%s/" % (parsed_url.scheme, ":".join(location))
+ self.available_embedded_api_urls = ["%s://%s/%s" % (parsed_url.scheme, ":".join(location), self.DEFAULT_REST_API_PATH)]
+ self.module.warn("Using alternate address [%s]" % self.available_embedded_api_urls[0])
+ break
+ else:
+ if retries > 0:
+ sleep(1)
+ self.update_target_interface_info()
+ self.update_url(retries=retries - 1)
+ else:
+ self.module.warn("Unable to obtain an alternate url!")
+
+ def update(self):
+ """Update controller with new interface, dns service, ntp service and/or remote ssh access information."""
+ change_required = self.update_request_body()
+
+ # Build list of available web services rest api urls
+ self.available_embedded_api_urls = []
+ parsed_url = urlparse.urlparse(self.url)
+ location = parsed_url.netloc.split(":")
+ for address in self.all_interface_addresses:
+ location[0] = address
+ self.available_embedded_api_urls = ["%s://%s/%s" % (parsed_url.scheme, ":".join(location), self.DEFAULT_REST_API_PATH)]
+
+ if change_required and not self.module.check_mode:
+
+ # Update url if currently used interface will be modified
+ if self.is_embedded():
+ if self.use_alternate_address:
+ self.update_url()
+ if self.address:
+ parsed_url = urlparse.urlparse(self.url)
+ location = parsed_url.netloc.split(":")
+ location[0] = self.address
+ self.available_embedded_api_urls.append("%s://%s/%s" % (parsed_url.scheme, ":".join(location), self.DEFAULT_REST_API_PATH))
+ else:
+ self.available_embedded_api_urls = ["%s/%s" % (self.url, self.DEFAULT_REST_API_PATH)]
+
+ # Update management interface
+ try:
+ rc, response = self.request("storage-systems/%s/configuration/ethernet-interfaces" % self.ssid, method="POST", data=self.body)
+ except Exception as error:
+ pass
+
+ # Validate all changes have been made
+ for retries in range(self.MAXIMUM_VERIFICATION_TIMEOUT):
+ if not self.update_request_body():
+ break
+ sleep(1)
+ else:
+ self.module.warn("Changes failed to complete! Timeout waiting for management interface to update. Array [%s]." % self.ssid)
+ self.module.exit_json(msg="The interface settings have been updated.", changed=change_required,
+ available_embedded_api_urls=self.available_embedded_api_urls)
+ self.module.exit_json(msg="No changes are required.", changed=change_required,
+ available_embedded_api_urls=self.available_embedded_api_urls if self.is_embedded() else [])
+
+
+def main():
+ interface = NetAppESeriesMgmtInterface()
+ interface.update()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_nvme_interface.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_nvme_interface.py
new file mode 100644
index 000000000..d4d042d01
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_nvme_interface.py
@@ -0,0 +1,305 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+---
+module: na_santricity_nvme_interface
+short_description: NetApp E-Series manage NVMe interface configuration
+description: Configure settings of an E-Series NVMe interface
+author: Nathan Swartz (@ndswartz)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.santricity_doc
+options:
+ address:
+ description:
+ - The IPv4 address to assign to the NVMe interface
+ type: str
+ required: false
+ subnet_mask:
+ description:
+ - The subnet mask to utilize for the interface.
+ - Only applicable when configuring RoCE
+ - Mutually exclusive with I(config_method=dhcp)
+ type: str
+ required: false
+ gateway:
+ description:
+ - The IPv4 gateway address to utilize for the interface.
+ - Only applicable when configuring RoCE
+ - Mutually exclusive with I(config_method=dhcp)
+ type: str
+ required: false
+ config_method:
+ description:
+ - The configuration method type to use for this interface.
+ - Only applicable when configuring RoCE
+ - dhcp is mutually exclusive with I(address), I(subnet_mask), and I(gateway).
+ type: str
+ choices:
+ - dhcp
+ - static
+ required: false
+ default: dhcp
+ mtu:
+ description:
+ - The maximum transmission units (MTU), in bytes.
+ - Only applicable when configuring RoCE
+ - This allows you to configure a larger value for the MTU, in order to enable jumbo frames
+ (any value > 1500).
+ - Generally, it is necessary to have your host, switches, and other components not only support jumbo
+ frames, but also have it configured properly. Therefore, unless you know what you're doing, it's best to
+ leave this at the default.
+ type: int
+ default: 1500
+ required: false
+ aliases:
+ - max_frame_size
+ speed:
+ description:
+ - This is the ethernet port speed measured in Gb/s.
+ - Value must be a supported speed or auto for automatically negotiating the speed with the port.
+ - Only applicable when configuring RoCE
+ - The configured ethernet port speed should match the speed capability of the SFP on the selected port.
+ type: str
+ required: false
+ default: auto
+ state:
+ description:
+ - Whether or not the specified RoCE interface should be enabled.
+ - Only applicable when configuring RoCE
+ choices:
+ - enabled
+ - disabled
+ type: str
+ required: false
+ default: enabled
+ channel:
+ description:
+ - This option specifies the which NVMe controller channel to configure.
+ - The list of choices is not necessarily comprehensive. It depends on the number of ports
+ that are available in the system.
+ - The numerical value represents the number of the channel (typically from left to right on the HIC),
+ beginning with a value of 1.
+ type: int
+ required: false
+ controller:
+ description:
+ - The controller that owns the port you want to configure.
+ - Controller names are presented alphabetically, with the first controller as A and the second as B.
+ type: str
+ required: false
+ choices: [A, B]
+"""
+EXAMPLES = """
+"""
+
+RETURN = """
+msg:
+ description: Success message
+ returned: on success
+ type: str
+ sample: The interface settings have been updated.
+"""
+import re
+
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule
+from ansible.module_utils._text import to_native
+
+
+class NetAppESeriesNvmeInterface(NetAppESeriesModule):
+ def __init__(self):
+ ansible_options = dict(address=dict(type="str", required=False),
+ subnet_mask=dict(type="str", required=False),
+ gateway=dict(type="str", required=False),
+ config_method=dict(type="str", required=False, default="dhcp", choices=["dhcp", "static"]),
+ mtu=dict(type="int", default=1500, required=False, aliases=["max_frame_size"]),
+ speed=dict(type="str", default="auto", required=False),
+ state=dict(type="str", default="enabled", required=False, choices=["enabled", "disabled"]),
+ channel=dict(type="int", required=True),
+ controller=dict(type="str", required=True, choices=["A", "B"]))
+
+ required_if = [["config_method", "static", ["address", "subnet_mask"]]]
+ super(NetAppESeriesNvmeInterface, self).__init__(ansible_options=ansible_options,
+ web_services_version="02.00.0000.0000",
+ required_if=required_if,
+ supports_check_mode=True)
+
+ args = self.module.params
+ self.address = args["address"]
+ self.subnet_mask = args["subnet_mask"]
+ self.gateway = args["gateway"]
+ self.config_method = "configDhcp" if args["config_method"] == "dhcp" else "configStatic"
+ self.mtu = args["mtu"]
+ self.speed = args["speed"]
+ self.enabled = args["state"] == "enabled"
+ self.channel = args["channel"]
+ self.controller = args["controller"]
+
+ address_regex = re.compile(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$")
+ if self.address and not address_regex.match(self.address):
+ self.module.fail_json(msg="An invalid ip address was provided for address. Address [%s]." % self.address)
+ if self.subnet_mask and not address_regex.match(self.subnet_mask):
+ self.module.fail_json(msg="An invalid ip address was provided for subnet_mask. Subnet mask [%s]." % self.subnet_mask)
+ if self.gateway and not address_regex.match(self.gateway):
+ self.module.fail_json(msg="An invalid ip address was provided for gateway. Gateway [%s]." % self.gateway)
+
+ self.get_target_interface_cache = None
+
+ def get_nvmeof_interfaces(self):
+ """Retrieve all interfaces that are using nvmeof"""
+ ifaces = list()
+ try:
+ rc, ifaces = self.request("storage-systems/%s/interfaces?channelType=hostside" % self.ssid)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve defined host interfaces. Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(error)))
+
+ # Filter out all not nvme-nvmeof hostside interfaces.
+ nvmeof_ifaces = []
+ for iface in ifaces:
+ interface_type = iface["ioInterfaceTypeData"]["interfaceType"]
+ properties = iface["commandProtocolPropertiesList"]["commandProtocolProperties"]
+
+ try:
+ link_status = iface["ioInterfaceTypeData"]["ib"]["linkState"]
+ except Exception as error:
+ link_status = iface["ioInterfaceTypeData"]["ethernet"]["interfaceData"]["ethernetData"]["linkStatus"]
+
+ if (properties and properties[0]["commandProtocol"] == "nvme" and
+ properties[0]["nvmeProperties"]["commandSet"] == "nvmeof"):
+ nvmeof_ifaces.append({"properties": properties[0]["nvmeProperties"]["nvmeofProperties"],
+ "reference": iface["interfaceRef"],
+ "channel": iface["ioInterfaceTypeData"][iface["ioInterfaceTypeData"]["interfaceType"]]["channel"],
+ "interface_type": interface_type,
+ "interface": iface["ioInterfaceTypeData"][interface_type],
+ "controller_id": iface["controllerRef"],
+ "link_status": link_status})
+ return nvmeof_ifaces
+
+ def get_controllers(self):
+ """Retrieve a mapping of controller labels to their references"""
+ controllers = list()
+ try:
+ rc, controllers = self.request("storage-systems/%s/graph/xpath-filter?query=/controller/id" % self.ssid)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve controller list! Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(error)))
+
+ controllers.sort()
+ controllers_dict = {}
+ i = ord("A")
+ for controller in controllers:
+ label = chr(i)
+ controllers_dict[label] = controller
+ i += 1
+
+ return controllers_dict
+
+ def get_target_interface(self):
+ """Retrieve the targeted controller interface"""
+ if self.get_target_interface_cache is None:
+ ifaces = self.get_nvmeof_interfaces()
+ controller_id = self.get_controllers()[self.controller]
+
+ controller_ifaces = []
+ for iface in ifaces:
+ if iface["controller_id"] == controller_id:
+ controller_ifaces.append(iface)
+
+ sorted_controller_ifaces = sorted(controller_ifaces, key=lambda x: x["channel"])
+ if self.channel < 1 or self.channel > len(controller_ifaces):
+ status_msg = ", ".join(["%s (link %s)" % (index + 1, iface["link_status"])
+ for index, iface in enumerate(sorted_controller_ifaces)])
+ self.module.fail_json(msg="Invalid controller %s NVMe channel. Available channels: %s, Array Id [%s]."
+ % (self.controller, status_msg, self.ssid))
+
+ self.get_target_interface_cache = sorted_controller_ifaces[self.channel - 1]
+
+ return self.get_target_interface_cache
+
+ def update(self):
+ """Update the storage system's controller nvme interface if needed."""
+ update_required = False
+ body = {}
+
+ iface = self.get_target_interface()
+ if iface["properties"]["provider"] == "providerInfiniband":
+ if (iface["properties"]["ibProperties"]["ipAddressData"]["addressType"] != "ipv4" or
+ iface["properties"]["ibProperties"]["ipAddressData"]["ipv4Data"]["ipv4Address"] != self.address):
+ update_required = True
+ body = {"settings": {"ibSettings": {"networkSettings": {"ipv4Address": self.address}}}}
+
+ elif iface["properties"]["provider"] == "providerRocev2":
+ interface_data = iface["interface"]["interfaceData"]["ethernetData"]
+ current_speed = interface_data["currentInterfaceSpeed"].lower().replace("speed", "").replace("gig", "")
+ interface_supported_speeds = [str(speed).lower().replace("speed", "").replace("gig", "")
+ for speed in interface_data["supportedInterfaceSpeeds"]]
+ if self.speed not in interface_supported_speeds:
+ self.module.fail_json(msg="Unsupported interface speed! Options %s. Array [%s]."
+ % (interface_supported_speeds, self.ssid))
+
+ roce_properties = iface["properties"]["roceV2Properties"]
+ if self.enabled != roce_properties["ipv4Enabled"]:
+ update_required = True
+ if self.address and roce_properties["ipv4Data"]["ipv4AddressConfigMethod"] != self.config_method:
+ update_required = True
+ if self.address and roce_properties["ipv4Data"]["ipv4AddressData"]["ipv4Address"] != self.address:
+ update_required = True
+ if self.subnet_mask and roce_properties["ipv4Data"]["ipv4AddressData"]["ipv4SubnetMask"] != self.subnet_mask:
+ update_required = True
+ if self.gateway and roce_properties["ipv4Data"]["ipv4AddressData"]["ipv4GatewayAddress"] != self.gateway:
+ update_required = True
+ if self.speed and self.speed != current_speed:
+ update_required = True
+ if (self.mtu and iface["interface"]["interfaceData"]["ethernetData"][
+ "maximumFramePayloadSize"] != self.mtu):
+ update_required = True
+
+ if update_required:
+ body = {"id": iface["reference"], "settings": {"roceV2Settings": {
+ "networkSettings": {"ipv4Enabled": self.enabled,
+ "ipv4Settings": {"configurationMethod": self.config_method}}}}}
+
+ if self.config_method == "configStatic":
+ if self.address:
+ body["settings"]["roceV2Settings"]["networkSettings"]["ipv4Settings"].update(
+ {"address": self.address})
+ if self.subnet_mask:
+ body["settings"]["roceV2Settings"]["networkSettings"]["ipv4Settings"].update(
+ {"subnetMask": self.subnet_mask})
+ if self.gateway:
+ body["settings"]["roceV2Settings"]["networkSettings"]["ipv4Settings"].update(
+ {"gatewayAddress": self.gateway})
+ if self.speed:
+ if self.speed == "auto":
+ body["settings"]["roceV2Settings"]["networkSettings"].update({"interfaceSpeed": "speedAuto"})
+ else:
+ body["settings"]["roceV2Settings"]["networkSettings"].update(
+ {"interfaceSpeed": "speed%sgig" % self.speed})
+ if self.mtu:
+ body["settings"]["roceV2Settings"]["networkSettings"].update({"interfaceMtu": self.mtu})
+
+ if update_required and not self.module.check_mode:
+ try:
+ rc, iface = self.request("storage-systems/%s/nvmeof/interfaces/%s" % (self.ssid, iface["reference"]),
+ method="POST", data=body)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to configure interface. Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(error)))
+
+ self.module.exit_json(msg="NVMeoF interface settings have been updated.", changed=update_required)
+ self.module.exit_json(msg="No changes have been made.", changed=update_required)
+
+
+def main():
+ nvmeof_interface = NetAppESeriesNvmeInterface()
+ nvmeof_interface.update()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_proxy_drive_firmware_upload.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_proxy_drive_firmware_upload.py
new file mode 100644
index 000000000..715467e18
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_proxy_drive_firmware_upload.py
@@ -0,0 +1,150 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+---
+module: na_santricity_proxy_drive_firmware_upload
+short_description: NetApp E-Series manage proxy drive firmware files
+description:
+ - Ensure drive firmware files are available on SANtricity Web Service Proxy.
+author:
+ - Nathan Swartz (@ndswartz)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.santricity_proxy_doc
+options:
+ firmware:
+ description:
+ - This option can be a list of file paths and/or directories containing drive firmware.
+ - Note that only files with the extension .dlp will be attempted to be added to the proxy; all other files will be ignored.
+ - NetApp E-Series drives require special firmware which can be downloaded from https://mysupport.netapp.com/NOW/download/tools/diskfw_eseries/
+ type: list
+ required: false
+"""
+EXAMPLES = """
+- name: Ensure correct firmware versions
+ na_santricity_proxy_drive_firmware_upload:
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ firmware:
+ - "path/to/drive_firmware_file1.dlp"
+ - "path/to/drive_firmware_file2.dlp"
+ - "path/to/drive_firmware_directory"
+"""
+RETURN = """
+msg:
+ description: Whether any changes have been made to the collection of drive firmware on SANtricity Web Services Proxy.
+ type: str
+ returned: always
+"""
+import os
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule, create_multipart_formdata, request
+
+
+class NetAppESeriesProxyDriveFirmwareUpload(NetAppESeriesModule):
+ WAIT_TIMEOUT_SEC = 60 * 15
+
+ def __init__(self):
+ ansible_options = dict(firmware=dict(type="list", required=False))
+
+ super(NetAppESeriesProxyDriveFirmwareUpload, self).__init__(ansible_options=ansible_options,
+ web_services_version="02.00.0000.0000",
+ supports_check_mode=True,
+ proxy_specific_task=True)
+ args = self.module.params
+ self.firmware = args["firmware"]
+
+ self.files = None
+ self.add_files = []
+ self.remove_files = []
+ self.upload_failures = []
+
+ def determine_file_paths(self):
+ """Determine all the drive firmware file paths."""
+ self.files = {}
+ if self.firmware:
+ for path in self.firmware:
+
+ if not os.path.exists(path):
+ self.module.fail_json(msg="Drive firmware file does not exist! File [%s]" % path)
+ elif os.path.isdir(path):
+ if not path.endswith("/"):
+ path = path + "/"
+ for dir_filename in os.listdir(path):
+ if ".dlp" in dir_filename:
+ self.files.update({dir_filename: path + dir_filename})
+ elif ".dlp" in path:
+ name = os.path.basename(path)
+ self.files.update({name: path})
+
+ self.module.warn("%s" % self.files)
+
+ def determine_changes(self):
+ """Determine whether drive firmware files should be uploaded to the proxy."""
+ try:
+ rc, results = self.request("files/drive")
+ current_files = [result["fileName"] for result in results]
+
+ for current_file in current_files:
+ if current_file not in self.files.keys():
+ self.remove_files.append(current_file)
+
+ for expected_file in self.files.keys():
+ if expected_file not in current_files:
+ self.add_files.append(expected_file)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve proxy drive firmware file list. Error [%s]" % error)
+
+ def upload_files(self):
+ """Add drive firmware file to the proxy."""
+ for filename in self.add_files:
+ firmware_name = os.path.basename(filename)
+ files = [("file", firmware_name, self.files[filename])]
+ headers, data = create_multipart_formdata(files)
+ try:
+ rc, response = self.request("/files/drive", method="POST", headers=headers, data=data)
+ except Exception as error:
+ self.upload_failures.append(filename)
+ self.module.warn("Failed to upload drive firmware file. File [%s]." % firmware_name)
+
+ def delete_files(self):
+ """Remove drive firmware file to the proxy."""
+ for filename in self.remove_files:
+ try:
+ rc, response = self.request("files/drive/%s" % filename, method="DELETE")
+ except Exception as error:
+ self.upload_failures.append(filename)
+ self.module.warn("Failed to delete drive firmware file. File [%s]" % filename)
+
+ def apply(self):
+ """Apply state to the web services proxy."""
+ change_required = False
+ if not self.is_proxy():
+ self.module.fail_json(msg="Module can only be executed against SANtricity Web Services Proxy.")
+
+ self.determine_file_paths()
+ self.determine_changes()
+
+ if self.add_files or self.remove_files:
+ change_required = True
+
+ if change_required and not self.module.check_mode:
+ self.upload_files()
+ self.delete_files()
+
+ self.module.exit_json(changed=change_required, files_added=self.add_files, files_removed=self.remove_files)
+
+
+def main():
+ proxy_firmware_upload = NetAppESeriesProxyDriveFirmwareUpload()
+ proxy_firmware_upload.apply()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_proxy_firmware_upload.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_proxy_firmware_upload.py
new file mode 100644
index 000000000..100b1f051
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_proxy_firmware_upload.py
@@ -0,0 +1,149 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+---
+module: na_santricity_proxy_firmware_upload
+short_description: NetApp E-Series manage proxy firmware uploads.
+description:
+ - Ensure specific firmware versions are available on SANtricity Web Services Proxy.
+author:
+ - Nathan Swartz (@ndswartz)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.santricity_proxy_doc
+options:
+ firmware:
+ description:
+ - List of paths and/or directories containing firmware/NVSRAM files.
+ - All firmware/NVSRAM files that are not specified will be removed from the proxy if they exist.
+ type: list
+ required: false
+"""
+EXAMPLES = """
+- name: Ensure proxy has the expected firmware versions.
+ na_santricity_proxy_firmware_upload:
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ firmware:
+ - "path/to/firmware/dlp_files"
+ - "path/to/nvsram.dlp"
+ - "path/to/firmware.dlp"
+"""
+RETURN = """
+msg:
+ description: Status and version of firmware and NVSRAM.
+ type: str
+ returned: always
+ sample:
+"""
+import os
+
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule, create_multipart_formdata, request
+
+
+class NetAppESeriesProxyFirmwareUpload(NetAppESeriesModule):
+ def __init__(self):
+ ansible_options = dict(firmware=dict(type="list", required=False))
+ super(NetAppESeriesProxyFirmwareUpload, self).__init__(ansible_options=ansible_options,
+ web_services_version="02.00.0000.0000",
+ supports_check_mode=True,
+ proxy_specific_task=True)
+
+ args = self.module.params
+ self.firmware = args["firmware"]
+ self.files = None
+ self.add_files = []
+ self.remove_files = []
+ self.upload_failures = []
+
+ def determine_file_paths(self):
+ """Determine all the drive firmware file paths."""
+ self.files = {}
+ if self.firmware:
+ for firmware_path in self.firmware:
+
+ if not os.path.exists(firmware_path):
+ self.module.fail_json(msg="Drive firmware file does not exist! File [%s]" % firmware_path)
+ elif os.path.isdir(firmware_path):
+ if not firmware_path.endswith("/"):
+ firmware_path = firmware_path + "/"
+
+ for dir_filename in os.listdir(firmware_path):
+ if ".dlp" in dir_filename:
+ self.files.update({dir_filename: firmware_path + dir_filename})
+ elif ".dlp" in firmware_path:
+ name = os.path.basename(firmware_path)
+ self.files.update({name: firmware_path})
+
+ def determine_changes(self):
+ """Determine whether files need to be added or removed."""
+ try:
+ rc, results = self.request("firmware/cfw-files")
+ current_files = [result["filename"] for result in results]
+
+ for current_file in current_files:
+ if current_file not in self.files.keys():
+ self.remove_files.append(current_file)
+
+ for expected_file in self.files.keys():
+ if expected_file not in current_files:
+ self.add_files.append(expected_file)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve current firmware file listing.")
+
+ def upload_files(self):
+ """Upload firmware and nvsram file."""
+ for filename in self.add_files:
+ fields = [("validate", "true")]
+ files = [("firmwareFile", filename, self.files[filename])]
+ headers, data = create_multipart_formdata(files=files, fields=fields)
+ try:
+ rc, response = self.request("firmware/upload/", method="POST", data=data, headers=headers)
+ except Exception as error:
+ self.upload_failures.append(filename)
+ self.module.warn("Failed to upload firmware file. File [%s]" % filename)
+
+ def delete_files(self):
+ """Remove firmware and nvsram file."""
+ for filename in self.remove_files:
+ try:
+ rc, response = self.request("firmware/upload/%s" % filename, method="DELETE")
+ except Exception as error:
+ self.upload_failures.append(filename)
+ self.module.warn("Failed to delete firmware file. File [%s]" % filename)
+
+ def apply(self):
+ """Upgrade controller firmware."""
+ change_required = False
+ if not self.is_proxy():
+ self.module.fail_json(msg="Module can only be executed against SANtricity Web Services Proxy.")
+
+ self.determine_file_paths()
+ self.determine_changes()
+ if self.add_files or self.remove_files:
+ change_required = True
+
+ if change_required and not self.module.check_mode:
+ self.upload_files()
+ self.delete_files()
+
+ if self.upload_failures:
+ self.module.fail_json(msg="Some file failed to be uploaded! changed=%s, Files_added [%s]. Files_removed [%s]. Upload_failures [%s]"
+ % (change_required, self.add_files, self.remove_files, self.upload_failures))
+ self.module.exit_json(changed=change_required, files_added=self.add_files, files_removed=self.remove_files)
+
+
+def main():
+ proxy_firmware_upload = NetAppESeriesProxyFirmwareUpload()
+ proxy_firmware_upload.apply()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_proxy_systems.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_proxy_systems.py
new file mode 100644
index 000000000..b572fe950
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_proxy_systems.py
@@ -0,0 +1,586 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+---
+module: na_santricity_proxy_systems
+short_description: NetApp E-Series manage SANtricity web services proxy storage arrays
+description:
+ - Manage the arrays accessible via a NetApp Web Services Proxy for NetApp E-series storage arrays.
+author:
+ - Nathan Swartz (@ndswartz)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.santricity_proxy_doc
+options:
+ add_discovered_systems:
+ description:
+ - This flag will force all discovered storage systems to be added to SANtricity Web Services Proxy.
+ type: bool
+ required: false
+ default: false
+ systems:
+ description:
+ - List of storage system information which defines which systems should be added on SANtricity Web Services Proxy.
+ - Accepts a simple serial number list or list of dictionary containing at minimum the serial or addresses key from the sub-option list.
+ - Note that the serial number will be used as the storage system identifier when an identifier is not specified.
+ - When I(add_discovered_systems == False) and any system serial number not supplied that is discovered will be removed from the proxy.
+ type: list
+ required: False
+ default: []
+ suboptions:
+ ssid:
+ description:
+ - This is the Web Services Proxy's identifier for a storage system.
+ - When ssid is not specified then either the serial or first controller IPv4 address will be used instead.
+ type: str
+ required: false
+ serial:
+ description:
+ - Storage system's serial number which can be located on the top of every NetApp E-Series enclosure.
+ - Include any leading zeros.
+ - Mutually exclusive with the sub-option address.
+ type: str
+ required: false
+ addresses:
+ description:
+ - List of storage system's IPv4 addresses.
+ - Mutually exclusive with the sub-option serial.
+ type: list
+ required: false
+ password:
+ description:
+ - This is the storage system admin password.
+ - When not provided I(default_password) will be used.
+ - The storage system admin password will be set on the device itself with the provided admin password if it is not set.
+ type: str
+ required: false
+ tags:
+ description:
+ - Optional meta tags to associate to the storage system
+ type: dict
+ required: false
+ subnet_mask:
+ description:
+ - This is the IPv4 search range for discovering E-Series storage arrays.
+ - IPv4 subnet mask specified in CIDR form. Example 192.168.1.0/24 would search the range 192.168.1.0 to 192.168.1.255.
+ - Be sure to include all management paths in the search range.
+ type: str
+ required: false
+ password:
+ description:
+ - Default storage system password which will be used anytime when password has not been provided in the I(systems) sub-options.
+ - The storage system admin password will be set on the device itself with the provided admin password if it is not set.
+ type: str
+ required: false
+ tags:
+ description:
+ - Default meta tags to associate with all storage systems if not otherwise specified in I(systems) sub-options.
+ type: dict
+ required: false
+ accept_certificate:
+ description:
+ - Accept the storage system's certificate automatically even when it is self-signed.
+ - Use M(na_santricity_certificates) to add certificates to SANtricity Web Services Proxy.
+ - SANtricity Web Services Proxy will fail to add any untrusted storage system.
+ type: bool
+ required: false
+ default: true
+"""
+
+EXAMPLES = """
+---
+ - name: Add storage systems to SANtricity Web Services Proxy
+ na_santricity_proxy_systems:
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ subnet_mask: 192.168.1.0/24
+ password: password
+ tags:
+ tag: value
+ accept_certificate: True
+ systems:
+ - ssid: "system1"
+ serial: "056233035640"
+ password: "asecretpassword"
+ tags:
+ use: corporate
+ location: sunnyvale
+ - ssid: "system2"
+ addresses:
+ - 192.168.1.100
+ - 192.168.2.100 # Second is not be required. It will be discovered
+ password: "anothersecretpassword"
+ - serial: "021324673799"
+ - "021637323454"
+ - name: Add storage system to SANtricity Web Services Proxy with serial number list only. The serial numbers will be used to identify each system.
+ na_santricity_proxy_systems:
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ subnet_mask: 192.168.1.0/24
+ password: password
+ accept_certificate: True
+ systems:
+ - "1144FG123018"
+ - "721716500123"
+ - "123540006043"
+ - "112123001239"
+ - name: Add all discovered storage system to SANtricity Web Services Proxy found in the IP address range 192.168.1.0 to 192.168.1.255.
+ na_santricity_proxy_systems:
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ add_discovered_systems: True
+ subnet_mask: 192.168.1.0/24
+ password: password
+ accept_certificate: True
+"""
+RETURN = """
+msg:
+ description: Description of actions performed.
+ type: str
+ returned: always
+ sample: "Storage systems [system1, system2, 1144FG123018, 721716500123, 123540006043, 112123001239] were added."
+"""
+import json
+import threading
+
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule
+from ansible.module_utils._text import to_native
+from time import sleep
+
+try:
+ import ipaddress
+except ImportError:
+ HAS_IPADDRESS = False
+else:
+ HAS_IPADDRESS = True
+
+
+class NetAppESeriesProxySystems(NetAppESeriesModule):
+ DEFAULT_CONNECTION_TIMEOUT_SEC = 30
+ DEFAULT_GRAPH_DISCOVERY_TIMEOUT = 30
+ DEFAULT_PASSWORD_STATE_TIMEOUT = 30
+ DEFAULT_DISCOVERY_TIMEOUT_SEC = 300
+
+ def __init__(self):
+ ansible_options = dict(add_discovered_systems=dict(type="bool", required=False, default=False),
+ subnet_mask=dict(type="str", required=False),
+ password=dict(type="str", required=False, default="", no_log=True),
+ tags=dict(type="dict", required=False),
+ accept_certificate=dict(type="bool", required=False, default=True),
+ systems=dict(type="list", required=False, default=[], suboptions=dict(ssid=dict(type="str", required=False),
+ serial=dict(type="str", required=False),
+ addresses=dict(type="list", required=False),
+ password=dict(type="str", required=False, no_log=True),
+ tags=dict(type="dict", required=False))))
+
+ super(NetAppESeriesProxySystems, self).__init__(ansible_options=ansible_options,
+ web_services_version="04.10.0000.0000",
+ supports_check_mode=True,
+ proxy_specific_task=True)
+ args = self.module.params
+ self.add_discovered_systems = args["add_discovered_systems"]
+ self.subnet_mask = args["subnet_mask"]
+ self.accept_certificate = args["accept_certificate"]
+ self.default_password = args["password"]
+
+ self.default_meta_tags = []
+ if "tags" in args and args["tags"]:
+ for key in args["tags"].keys():
+ if isinstance(args["tags"][key], list):
+ self.default_meta_tags.append({"key": key, "valueList": args["tags"][key]})
+ else:
+ self.default_meta_tags.append({"key": key, "valueList": [args["tags"][key]]})
+ self.default_meta_tags = sorted(self.default_meta_tags, key=lambda x: x["key"])
+
+ self.undiscovered_systems = []
+ self.systems_to_remove = []
+ self.systems_to_update = []
+ self.systems_to_add = []
+
+ self.serial_numbers = []
+ self.systems = []
+ if args["systems"]:
+ for system in args["systems"]:
+
+ if isinstance(system, str): # system is a serial number
+ self.serial_numbers.append(system)
+ self.systems.append({"ssid": system,
+ "serial": system,
+ "password": self.default_password,
+ "password_valid": None,
+ "password_set": None,
+ "stored_password_valid": None,
+ "meta_tags": self.default_meta_tags,
+ "controller_addresses": [],
+ "embedded_available": None,
+ "accept_certificate": False,
+ "current_info": {},
+ "changes": {},
+ "updated_required": False,
+ "failed": False,
+ "discovered": False})
+ elif isinstance(system, dict): # system is a dictionary of system details
+ if "ssid" not in system:
+ if "serial" in system and system["serial"]:
+ system.update({"ssid": system["serial"]})
+ elif "addresses" in system and system["addresses"]:
+ system.update({"ssid": system["addresses"][0]})
+ if "password" not in system:
+ system.update({"password": self.default_password})
+
+ if "serial" in system and system["serial"]:
+ self.serial_numbers.append(system["serial"])
+
+ # Structure meta tags for Web Services
+ meta_tags = self.default_meta_tags
+ if "meta_tags" in system and system["meta_tags"]:
+ for key in system["meta_tags"].keys():
+ if isinstance(system["meta_tags"][key], list):
+ meta_tags.append({"key": key, "valueList": system["meta_tags"][key]})
+ else:
+ meta_tags.append({"key": key, "valueList": [system["meta_tags"][key]]})
+ meta_tags = sorted(meta_tags, key=lambda x: x["key"])
+
+ self.systems.append({"ssid": str(system["ssid"]),
+ "serial": system["serial"] if "serial" in system else "",
+ "password": system["password"],
+ "password_valid": None,
+ "password_set": None,
+ "stored_password_valid": None,
+ "meta_tags": meta_tags,
+ "controller_addresses": system["addresses"] if "addresses" in system else [],
+ "embedded_available": None,
+ "accept_certificate": False,
+ "current_info": {},
+ "changes": {},
+ "updated_required": False,
+ "failed": False,
+ "discovered": False})
+ else:
+ self.module.fail_json(msg="Invalid system! All systems must either be a simple serial number or a dictionary. Failed system: %s" % system)
+
+ # Update default request headers
+ self.DEFAULT_HEADERS.update({"x-netapp-password-validate-method": "none"})
+
+ def discover_array(self):
+ """Search for array using the world wide identifier."""
+ subnet = ipaddress.ip_network(u"%s" % self.subnet_mask)
+
+ try:
+ rc, request_id = self.request("discovery", method="POST", data={"startIP": str(subnet[0]), "endIP": str(subnet[-1]),
+ "connectionTimeout": self.DEFAULT_CONNECTION_TIMEOUT_SEC})
+
+ # Wait for discover to complete
+ discovered_systems = None
+ try:
+ for iteration in range(self.DEFAULT_DISCOVERY_TIMEOUT_SEC):
+ rc, discovered_systems = self.request("discovery?requestId=%s" % request_id["requestId"])
+ if not discovered_systems["discoverProcessRunning"]:
+ break
+ sleep(1)
+ else:
+ self.module.fail_json(msg="Timeout waiting for array discovery process. Subnet [%s]" % self.subnet_mask)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to get the discovery results. Error [%s]." % to_native(error))
+
+ if not discovered_systems:
+ self.module.warn("Discovery found no systems. IP starting address [%s]. IP ending address: [%s]." % (str(subnet[0]), str(subnet[-1])))
+ else:
+ # Add all newly discovered systems. This is ignore any supplied systems to prevent any duplicates.
+ if self.add_discovered_systems:
+ for discovered_system in discovered_systems["storageSystems"]:
+ if discovered_system["serialNumber"] not in self.serial_numbers:
+ self.systems.append({"ssid": discovered_system["serialNumber"],
+ "serial": discovered_system["serialNumber"],
+ "password": self.default_password,
+ "password_valid": None,
+ "password_set": None,
+ "stored_password_valid": None,
+ "meta_tags": self.default_meta_tags,
+ "controller_addresses": [],
+ "embedded_available": None,
+ "accept_certificate": False,
+ "current_info": {},
+ "changes": {},
+ "updated_required": False,
+ "failed": False,
+ "discovered": False})
+
+ # Update controller_addresses
+ for system in self.systems:
+ for discovered_system in discovered_systems["storageSystems"]:
+ if (system["serial"] == discovered_system["serialNumber"] or
+ (system["controller_addresses"] and
+ all([address in discovered_system["ipAddresses"] for address in system["controller_addresses"]]))):
+ system["controller_addresses"] = sorted(discovered_system["ipAddresses"])
+ system["embedded_available"] = "https" in discovered_system["supportedManagementPorts"]
+ system["accept_certificate"] = system["embedded_available"] and self.accept_certificate
+ system["discovered"] = True
+ break
+ else: # Remove any undiscovered system from the systems list
+
+ self.undiscovered_systems.append(system["ssid"])
+ # self.systems.remove(system)
+
+ except Exception as error:
+ self.module.fail_json(msg="Failed to initiate array discovery. Error [%s]." % to_native(error))
+
+ def update_storage_systems_info(self):
+ """Get current web services proxy storage systems."""
+ try:
+ rc, existing_systems = self.request("storage-systems")
+
+ # Mark systems for adding or removing
+ for system in self.systems:
+ for existing_system in existing_systems:
+ if system["ssid"] == existing_system["id"]:
+ system["current_info"] = existing_system
+
+ if system["current_info"]["passwordStatus"] in ["unknown", "securityLockout"]:
+ system["failed"] = True
+ self.module.warn("Skipping storage system [%s] because of current password status [%s]"
+ % (system["ssid"], system["current_info"]["passwordStatus"]))
+ if system["current_info"]["metaTags"]:
+ system["current_info"]["metaTags"] = sorted(system["current_info"]["metaTags"], key=lambda x: x["key"])
+ break
+ else:
+ self.systems_to_add.append(system)
+
+ # Mark systems for removing
+ for existing_system in existing_systems:
+ for system in self.systems:
+ if existing_system["id"] == system["ssid"]:
+
+ # Leave existing but undiscovered storage systems alone and throw a warning.
+ if existing_system["id"] in self.undiscovered_systems:
+ self.undiscovered_systems.remove(existing_system["id"])
+ self.module.warn("Expected storage system exists on the proxy but was failed to be discovered. Array [%s]." % existing_system["id"])
+ break
+ else:
+ self.systems_to_remove.append(existing_system["id"])
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve storage systems. Error [%s]." % to_native(error))
+
+ def set_password(self, system):
+ """Determine whether password has been set and, if it hasn't been set, set it."""
+ if system["embedded_available"] and system["controller_addresses"]:
+ for url in ["https://%s:8443/devmgr" % system["controller_addresses"][0],
+ "https://%s:443/devmgr" % system["controller_addresses"][0],
+ "http://%s:8080/devmgr" % system["controller_addresses"][0]]:
+ try:
+ rc, response = self._request("%s/utils/login?uid=admin&xsrf=false&onlycheck=true" % url, ignore_errors=True, url_username="admin",
+ url_password="", validate_certs=False)
+
+ if rc == 200: # successful login without password
+ system["password_set"] = False
+ if system["password"]:
+ try:
+ rc, storage_system = self._request("%s/v2/storage-systems/1/passwords" % url, method="POST", url_username="admin",
+ headers=self.DEFAULT_HEADERS, url_password="", validate_certs=False,
+ data=json.dumps({"currentAdminPassword": "", "adminPassword": True,
+ "newPassword": system["password"]}))
+
+ except Exception as error:
+ system["failed"] = True
+ self.module.warn("Failed to set storage system password. Array [%s]." % system["ssid"])
+ break
+
+ elif rc == 401: # unauthorized
+ system["password_set"] = True
+ break
+ except Exception as error:
+ pass
+ else:
+ self.module.warn("Failed to retrieve array password state. Array [%s]." % system["ssid"])
+ system["failed"] = True
+
+ def update_system_changes(self, system):
+ """Determine whether storage system configuration changes are required """
+ if system["current_info"]:
+ system["changes"] = dict()
+
+ # Check if management paths should be updated
+ if (sorted(system["controller_addresses"]) != sorted(system["current_info"]["managementPaths"]) or
+ system["current_info"]["ip1"] not in system["current_info"]["managementPaths"] or
+ system["current_info"]["ip2"] not in system["current_info"]["managementPaths"]):
+ system["changes"].update({"controllerAddresses": system["controller_addresses"]})
+
+ # Check for expected meta tag count
+ if len(system["meta_tags"]) != len(system["current_info"]["metaTags"]):
+ if len(system["meta_tags"]) == 0:
+ system["changes"].update({"removeAllTags": True})
+ else:
+ system["changes"].update({"metaTags": system["meta_tags"]})
+
+ # Check for expected meta tag key-values
+ else:
+ for index in range(len(system["meta_tags"])):
+ if (system["current_info"]["metaTags"][index]["key"] != system["meta_tags"][index]["key"] or
+ sorted(system["current_info"]["metaTags"][index]["valueList"]) != sorted(system["meta_tags"][index]["valueList"])):
+ system["changes"].update({"metaTags": system["meta_tags"]})
+ break
+
+ # Check whether CA certificate should be accepted
+ if system["accept_certificate"] and not all([controller["certificateStatus"] == "trusted" for controller in system["current_info"]["controllers"]]):
+ system["changes"].update({"acceptCertificate": True})
+
+ if system["id"] not in self.undiscovered_systems and system["changes"]:
+ self.systems_to_update.append(system)
+
+ def add_system(self, system):
+ """Add basic storage system definition to the web services proxy."""
+ self.set_password(system)
+
+ body = {"id": system["ssid"],
+ "controllerAddresses": system["controller_addresses"],
+ "password": system["password"]}
+ if system["accept_certificate"]: # Set only if embedded is available and accept_certificates==True
+ body.update({"acceptCertificate": system["accept_certificate"]})
+ if system["meta_tags"]:
+ body.update({"metaTags": system["meta_tags"]})
+
+ try:
+ rc, storage_system = self.request("storage-systems", method="POST", data=body)
+ except Exception as error:
+ self.module.warn("Failed to add storage system. Array [%s]. Error [%s]" % (system["ssid"], to_native(error)))
+ return # Skip the password validation.
+
+ # Ensure the password is validated
+ for retries in range(5):
+ sleep(1)
+ try:
+ rc, storage_system = self.request("storage-systems/%s/validatePassword" % system["ssid"], method="POST")
+ break
+ except Exception as error:
+ continue
+ else:
+ self.module.warn("Failed to validate password status. Array [%s]. Error [%s]" % (system["ssid"], to_native(error)))
+
+ def update_system(self, system):
+ """Update storage system configuration."""
+ try:
+ rc, storage_system = self.request("storage-systems/%s" % system["ssid"], method="POST", data=system["changes"])
+ except Exception as error:
+ self.module.warn("Failed to update storage system. Array [%s]. Error [%s]" % (system["ssid"], to_native(error)))
+
+ def remove_system(self, ssid):
+ """Remove storage system."""
+ try:
+ rc, storage_system = self.request("storage-systems/%s" % ssid, method="DELETE")
+ except Exception as error:
+ self.module.warn("Failed to remove storage system. Array [%s]. Error [%s]." % (ssid, to_native(error)))
+
+ def apply(self):
+ """Determine whether changes are required and, if necessary, apply them."""
+ missing_packages = []
+ if not HAS_IPADDRESS:
+ missing_packages.append("ipaddress")
+
+ if missing_packages:
+ self.module.fail_json(msg="Python packages are missing! Packages [%s]." % ", ".join(missing_packages))
+
+ if self.is_embedded():
+ self.module.fail_json(msg="Cannot add/remove storage systems to SANtricity Web Services Embedded instance.")
+
+ if self.add_discovered_systems or self.systems:
+ if self.subnet_mask:
+ self.discover_array()
+ self.update_storage_systems_info()
+
+ # Determine whether the storage system requires updating
+ thread_pool = []
+ for system in self.systems:
+ if not system["failed"]:
+ thread = threading.Thread(target=self.update_system_changes, args=(system,))
+ thread_pool.append(thread)
+ thread.start()
+ for thread in thread_pool:
+ thread.join()
+ else:
+ self.update_storage_systems_info()
+
+ changes_required = False
+ if self.systems_to_add or self.systems_to_update or self.systems_to_remove:
+ changes_required = True
+
+ if changes_required and not self.module.check_mode:
+ add_msg = ""
+ update_msg = ""
+ remove_msg = ""
+
+ # Remove storage systems
+ if self.systems_to_remove:
+ ssids = []
+ thread_pool = []
+ for ssid in self.systems_to_remove:
+ thread = threading.Thread(target=self.remove_system, args=(ssid,))
+ thread_pool.append(thread)
+ thread.start()
+ ssids.append(ssid)
+ for thread in thread_pool:
+ thread.join()
+ if ssids:
+ remove_msg = "system%s removed: %s" % ("s" if len(ssids) > 1 else "", ", ".join(ssids))
+
+ thread_pool = []
+
+ # Add storage systems
+ if self.systems_to_add:
+ ssids = []
+ for system in self.systems_to_add:
+ if not system["failed"]:
+ thread = threading.Thread(target=self.add_system, args=(system,))
+ thread_pool.append(thread)
+ thread.start()
+ ssids.append(system["ssid"])
+ if ssids:
+ add_msg = "system%s added: %s" % ("s" if len(ssids) > 1 else "", ", ".join(ssids))
+
+ # Update storage systems
+ if self.systems_to_update:
+ ssids = []
+ for system in self.systems_to_update:
+ if not system["failed"]:
+ thread = threading.Thread(target=self.update_system, args=(system,))
+ thread_pool.append(thread)
+ thread.start()
+ ssids.append(system["ssid"])
+ if ssids:
+ update_msg = "system%s updated: %s" % ("s" if len(ssids) > 1 else "", ", ".join(ssids))
+
+ # Wait for storage systems to be added or updated
+ for thread in thread_pool:
+ thread.join()
+
+ # Report module actions
+ if self.undiscovered_systems:
+ undiscovered_msg = "system%s undiscovered: %s" % ("s " if len(self.undiscovered_systems) > 1 else "", ", ".join(self.undiscovered_systems))
+ self.module.fail_json(msg=(", ".join([msg for msg in [add_msg, update_msg, remove_msg, undiscovered_msg] if msg])), changed=changes_required)
+
+ self.module.exit_json(msg=", ".join([msg for msg in [add_msg, update_msg, remove_msg] if msg]), changed=changes_required)
+
+ # Report no changes
+ if self.undiscovered_systems:
+ self.module.fail_json(msg="No changes were made; however the following system(s) failed to be discovered: %s."
+ % self.undiscovered_systems, changed=changes_required)
+ self.module.exit_json(msg="No changes were made.", changed=changes_required)
+
+
+def main():
+ proxy_systems = NetAppESeriesProxySystems()
+ proxy_systems.apply()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_server_certificate.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_server_certificate.py
new file mode 100644
index 000000000..909819ce2
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_server_certificate.py
@@ -0,0 +1,539 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+module: na_santricity_server_certificate
+short_description: NetApp E-Series manage the storage system's server SSL certificates.
+description: Manage NetApp E-Series storage system's server SSL certificates.
+author: Nathan Swartz (@ndswartz)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.santricity_doc
+options:
+ controller:
+ description:
+ - The controller that owns the port you want to configure.
+ - Controller names are represented alphabetically, with the first controller as A, the second as B, and so on.
+ - Current hardware models have either 1 or 2 available controllers, but that is not a guaranteed hard limitation and could change in the future.
+ - I(controller) must be specified unless managing SANtricity Web Services Proxy (ie I(ssid="proxy"))
+ choices:
+ - A
+ - B
+ type: str
+ required: false
+ certificates:
+ description:
+ - Unordered list of all server certificate files which include PEM and DER encoded certificates as well as private keys.
+ - When I(certificates) is not defined then a self-signed certificate will be expected.
+ type: list
+ required: false
+ passphrase:
+ description:
+ - Passphrase for PEM encoded private key encryption.
+ - If I(passphrase) is not supplied then Ansible will prompt for private key certificate.
+ type: str
+ required: false
+notes:
+ - Set I(ssid=='0') or I(ssid=='proxy') to specifically reference SANtricity Web Services Proxy.
+ - Certificates can be the following filetypes - PEM (.pem, .crt, .cer, or .key) or DER (.der or .cer)
+ - When I(certificates) is not defined then a self-signed certificate will be expected.
+requirements:
+ - cryptography
+"""
+EXAMPLES = """
+- name: Ensure signed certificate is installed.
+ na_santricity_server_certificate:
+ ssid: 1
+ api_url: https://192.168.1.100:8443/devmgr/v2
+ api_username: admin
+ api_password: adminpass
+ controller: A
+ certificates:
+ - 'root_auth_cert.pem'
+ - 'intermediate_auth1_cert.pem'
+ - 'intermediate_auth2_cert.pem'
+ - 'public_cert.pem'
+ - 'private_key.pem'
+ passphrase: keypass
+- name: Ensure signed certificate bundle is installed.
+ na_santricity_server_certificate:
+ ssid: 1
+ api_url: https://192.168.1.100:8443/devmgr/v2
+ api_username: admin
+ api_password: adminpass
+ controller: B
+ certificates:
+ - 'cert_bundle.pem'
+ passphrase: keypass
+- name: Ensure storage system generated self-signed certificate is installed.
+ na_santricity_server_certificate:
+ ssid: 1
+ api_url: https://192.168.1.100:8443/devmgr/v2
+ api_username: admin
+ api_password: adminpass
+ controller: A
+"""
+RETURN = """
+changed:
+ description: Whether changes have been made.
+ type: bool
+ returned: always
+ sample: true
+signed_server_certificate:
+ description: Whether the public server certificate is signed.
+ type: bool
+ returned: always
+ sample: true
+added_certificates:
+ description: Any SSL certificates that were added.
+ type: list
+ returned: always
+ sample: ['added_certificiate.crt']
+removed_certificates:
+ description: Any SSL certificates that were removed.
+ type: list
+ returned: always
+ sample: ['removed_certificiate.crt']
+"""
+
+import binascii
+import random
+import re
+
+from ansible.module_utils import six
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule
+from ansible.module_utils._text import to_native
+from time import sleep
+
+try:
+ import cryptography
+ from cryptography import x509
+ from cryptography.hazmat.primitives import serialization
+ from cryptography.hazmat.backends import default_backend
+except ImportError:
+ HAS_CRYPTOGRAPHY = False
+else:
+ HAS_CRYPTOGRAPHY = True
+
+
+def create_multipart_formdata(file_details):
+ """Create the data for a multipart/form request for a certificate."""
+ boundary = "---------------------------" + "".join([str(random.randint(0, 9)) for x in range(30)])
+ data_parts = list()
+ data = None
+
+ if six.PY2: # Generate payload for Python 2
+ newline = "\r\n"
+ for name, filename, content in file_details:
+ data_parts.extend(["--%s" % boundary,
+ 'Content-Disposition: form-data; name="%s"; filename="%s"' % (name, filename),
+ "Content-Type: application/octet-stream",
+ "",
+ content])
+ data_parts.extend(["--%s--" % boundary, ""])
+ data = newline.join(data_parts)
+
+ else:
+ newline = six.b("\r\n")
+ for name, filename, content in file_details:
+ data_parts.extend([six.b("--%s" % boundary),
+ six.b('Content-Disposition: form-data; name="%s"; filename="%s"' % (name, filename)),
+ six.b("Content-Type: application/octet-stream"),
+ six.b(""),
+ content])
+ data_parts.extend([six.b("--%s--" % boundary), b""])
+ data = newline.join(data_parts)
+
+ headers = {
+ "Content-Type": "multipart/form-data; boundary=%s" % boundary,
+ "Content-Length": str(len(data))}
+
+ return headers, data
+
+
+class NetAppESeriesServerCertificate(NetAppESeriesModule):
+ RESET_SSL_CONFIG_TIMEOUT_SEC = 3 * 60
+
+ def __init__(self):
+ ansible_options = dict(controller=dict(type="str", required=False, choices=["A", "B"]),
+ certificates=dict(type="list", required=False),
+ passphrase=dict(type="str", required=False, no_log=True))
+
+ super(NetAppESeriesServerCertificate, self).__init__(ansible_options=ansible_options,
+ web_services_version="05.00.0000.0000",
+ supports_check_mode=True)
+ args = self.module.params
+ self.controller = args["controller"]
+ self.certificates = args["certificates"] if "certificates" in args.keys() else list()
+ self.passphrase = args["passphrase"] if "passphrase" in args.keys() else None
+
+ # Check whether request needs to be forwarded on to the controller web services rest api.
+ self.url_path_prefix = ""
+ self.url_path_suffix = ""
+ if self.is_proxy():
+ if self.ssid.lower() in ["0", "proxy"]:
+ self.url_path_suffix = "?controller=auto"
+ elif self.controller is not None:
+ self.url_path_prefix = "storage-systems/%s/forward/devmgr/v2/" % self.ssid
+ self.url_path_suffix = "?controller=%s" % self.controller.lower()
+ else:
+ self.module.fail_json(msg="Invalid options! You must specify which controller's certificates to modify. Array [%s]." % self.ssid)
+ elif self.controller is None:
+ self.module.fail_json(msg="Invalid options! You must specify which controller's certificates to modify. Array [%s]." % self.ssid)
+
+ self.cache_get_current_certificates = None
+ self.cache_is_controller_alternate = None
+ self.cache_is_public_server_certificate_signed = None
+
+ def get_controllers(self):
+ """Retrieve a mapping of controller labels to their controller slot."""
+ controllers_dict = {}
+ controllers = []
+ try:
+ rc, controllers = self.request("storage-systems/%s/controllers" % self.ssid)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve the controller settings. Array Id [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ for controller in controllers:
+ slot = controller['physicalLocation']['slot']
+ letter = chr(slot + 64)
+ controllers_dict.update({letter: slot})
+
+ return controllers_dict
+
+ def check_controller(self):
+ """Is the effected controller the alternate controller."""
+ controllers_info = self.get_controllers()
+ try:
+ rc, about = self.request("utils/about", rest_api_path=self.DEFAULT_BASE_PATH)
+ self.url_path_suffix = "?alternate=%s" % ("true" if controllers_info[self.controller] != about["controllerPosition"] else "false")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve accessing controller slot information. Array [%s]." % self.ssid)
+
+ @staticmethod
+ def sanitize_distinguished_name(dn):
+ """Generate a sorted distinguished name string to account for different formats/orders."""
+ dn = re.sub(" *= *", "=", dn).lower()
+ dn = re.sub(", *(?=[a-zA-Z]+={1})", "---SPLIT_MARK---", dn)
+ dn_parts = dn.split("---SPLIT_MARK---")
+ dn_parts.sort()
+ return ",".join(dn_parts)
+
+ def certificate_info_from_file(self, path):
+ """Determine the certificate info from the provided filepath."""
+ certificates_info = {}
+ try:
+ # Treat file as PEM encoded file.
+ with open(path, "r") as fh:
+ line = fh.readline()
+ while line != "":
+
+ # Add public certificates to bundle_info.
+ if re.search("^-+BEGIN CERTIFICATE-+$", line):
+ certificate = line
+ line = fh.readline()
+ while not re.search("^-+END CERTIFICATE-+$", line):
+ if line == "":
+ self.module.fail_json(msg="Invalid certificate! Path [%s]. Array [%s]." % (path, self.ssid))
+ certificate += line
+ line = fh.readline()
+ certificate += line
+ if not six.PY2:
+ certificate = six.b(certificate)
+ info = x509.load_pem_x509_certificate(certificate, default_backend())
+ certificates_info.update(self.certificate_info(info, certificate, path))
+
+ # Add private key to self.private_key.
+ elif re.search("^-+BEGIN.*PRIVATE KEY-+$", line):
+ pkcs8 = "BEGIN PRIVATE KEY" in line
+ pkcs8_encrypted = "BEGIN ENCRYPTED PRIVATE KEY" in line
+ key = line
+ line = fh.readline()
+ while not re.search("^-+END.*PRIVATE KEY-+$", line):
+ if line == "":
+ self.module.fail_json(msg="Invalid certificate! Array [%s]." % self.ssid)
+ key += line
+ line = fh.readline()
+ key += line
+ if not six.PY2:
+ key = six.b(key)
+ if self.passphrase:
+ self.passphrase = six.b(self.passphrase)
+
+ # Check for PKCS8 PEM encoding.
+ if pkcs8 or pkcs8_encrypted:
+ try:
+ if pkcs8:
+ crypto_key = serialization.load_pem_private_key(key, password=None, backend=default_backend())
+ else:
+ crypto_key = serialization.load_pem_private_key(key, password=self.passphrase, backend=default_backend())
+ except ValueError as error:
+ self.module.fail_json(msg="Failed to load%sPKCS8 encoded private key. %s"
+ " Error [%s]." % (" encrypted " if pkcs8_encrypted else " ",
+ "Check passphrase." if pkcs8_encrypted else "", error))
+
+ key = crypto_key.private_bytes(encoding=serialization.Encoding.PEM,
+ format=serialization.PrivateFormat.TraditionalOpenSSL,
+ encryption_algorithm=serialization.NoEncryption())
+
+ # Check whether multiple private keys have been provided and fail if different
+ if "private_key" in certificates_info.keys() and certificates_info["private_key"] != key:
+ self.module.fail_json(msg="Multiple private keys have been provided! Array [%s]" % self.ssid)
+ else:
+ certificates_info.update({"private_key": key})
+
+ line = fh.readline()
+
+ # Throw exception when no PEM certificates have been discovered.
+ if len(certificates_info) == 0:
+ raise Exception("Failed to discover a valid PEM encoded certificate or private key!")
+
+ except Exception as error:
+ # Treat file as DER encoded certificate
+ try:
+ with open(path, "rb") as fh:
+ cert_info = x509.load_der_x509_certificate(fh.read(), default_backend())
+ cert_data = cert_info.public_bytes(serialization.Encoding.PEM)
+ certificates_info.update(self.certificate_info(cert_info, cert_data, path))
+
+ # Throw exception when no DER encoded certificates have been discovered.
+ if len(certificates_info) == 0:
+ raise Exception("Failed to discover a valid DER encoded certificate!")
+ except Exception as error:
+
+ # Treat file as DER encoded private key
+ try:
+ with open(path, "rb") as fh:
+ crypto_key = serialization.load_der_public_key(fh.read(), backend=default_backend())
+ key = crypto_key.private_bytes(encoding=serialization.Encoding.PEM,
+ format=serialization.PrivateFormat.TraditionalOpenSSL,
+ encryption_algorithm=serialization.NoEncryption())
+ certificates_info.update({"private_key": key})
+ except Exception as error:
+ self.module.fail_json(msg="Invalid file type! File is neither PEM or DER encoded certificate/private key."
+ " Path [%s]. Array [%s]. Error [%s]." % (path, self.ssid, to_native(error)))
+
+ return certificates_info
+
+ def certificate_info(self, info, data, path):
+ """Load x509 certificate that is either encoded DER or PEM encoding and return the certificate fingerprint."""
+ fingerprint = binascii.hexlify(info.fingerprint(info.signature_hash_algorithm)).decode("utf-8")
+ return {self.sanitize_distinguished_name(info.subject.rfc4514_string()): {"alias": fingerprint, "fingerprint": fingerprint,
+ "certificate": data, "path": path,
+ "issuer": self.sanitize_distinguished_name(info.issuer.rfc4514_string())}}
+
+ def get_current_certificates(self):
+ """Determine the server certificates that exist on the storage system."""
+ if self.cache_get_current_certificates is None:
+ current_certificates = []
+ try:
+ rc, current_certificates = self.request(self.url_path_prefix + "certificates/server%s" % self.url_path_suffix)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve server certificates. Array [%s]." % self.ssid)
+
+ self.cache_get_current_certificates = {}
+ for certificate in current_certificates:
+ certificate.update({"issuer": self.sanitize_distinguished_name(certificate["issuerDN"])})
+ self.cache_get_current_certificates.update({self.sanitize_distinguished_name(certificate["subjectDN"]): certificate})
+
+ return self.cache_get_current_certificates
+
+ def is_public_server_certificate_signed(self):
+ """Return whether the public server certificate is signed."""
+ if self.cache_is_public_server_certificate_signed is None:
+ current_certificates = self.get_current_certificates()
+
+ for certificate in current_certificates:
+ if current_certificates[certificate]["alias"] == "jetty":
+ self.cache_is_public_server_certificate_signed = current_certificates[certificate]["type"] == "caSigned"
+ break
+
+ return self.cache_is_public_server_certificate_signed
+
+ def get_expected_certificates(self):
+ """Determine effected certificates and return certificate list in the required submission order."""
+ certificates_info = {}
+ existing_certificates = self.get_current_certificates()
+
+ private_key = None
+ if self.certificates:
+ for path in self.certificates:
+ info = self.certificate_info_from_file(path)
+ if "private_key" in info.keys():
+ if private_key is not None and info["private_key"] != private_key:
+ self.module.fail_json(msg="Multiple private keys have been provided! Array [%s]" % self.ssid)
+ else:
+ private_key = info.pop("private_key")
+ certificates_info.update(info)
+
+ # Determine bundle certificate ordering.
+ ordered_certificates_info = [dict] * len(certificates_info)
+ ordered_certificates_info_index = len(certificates_info) - 1
+ while certificates_info:
+ for certificate_subject in certificates_info.keys():
+
+ # Determine all remaining issuers.
+ remaining_issuer_list = [info["issuer"] for subject, info in existing_certificates.items()]
+ for subject, info in certificates_info.items():
+ remaining_issuer_list.append(info["issuer"])
+
+ # Search for the next certificate that is not an issuer of the remaining certificates in certificates_info dictionary.
+ if certificate_subject not in remaining_issuer_list:
+ ordered_certificates_info[ordered_certificates_info_index] = certificates_info[certificate_subject]
+ certificates_info.pop(certificate_subject)
+ ordered_certificates_info_index -= 1
+ break
+ else: # Add remaining root certificate if one exists.
+ for certificate_subject in certificates_info.keys():
+ ordered_certificates_info[ordered_certificates_info_index] = certificates_info[certificate_subject]
+ ordered_certificates_info_index -= 1
+ break
+ return {"private_key": private_key, "certificates": ordered_certificates_info}
+
+ def determine_changes(self):
+ """Determine certificates that need to be added or removed from storage system's server certificates database."""
+ if not self.is_proxy():
+ self.check_controller()
+ existing_certificates = self.get_current_certificates()
+ expected = self.get_expected_certificates()
+ certificates = expected["certificates"]
+
+ changes = {"change_required": False,
+ "signed_cert": True if certificates else False,
+ "private_key": expected["private_key"],
+ "public_cert": None,
+ "add_certs": [],
+ "remove_certs": []}
+
+ # Determine whether any expected certificates are missing from the storage system's database.
+ if certificates:
+
+ # Create a initial remove_cert list.
+ for existing_certificate_subject, existing_certificate in existing_certificates.items():
+ changes["remove_certs"].append(existing_certificate["alias"])
+
+ # Determine expected certificates
+ last_certificate_index = len(certificates) - 1
+ for certificate_index, certificate in enumerate(certificates):
+ for existing_certificate_subject, existing_certificate in existing_certificates.items():
+
+ if certificate_index == last_certificate_index:
+ if existing_certificate["alias"] == "jetty":
+ if (certificate["fingerprint"] != existing_certificate["shaFingerprint"] and
+ certificate["fingerprint"] != existing_certificate["sha256Fingerprint"]):
+ changes["change_required"] = True
+ changes["public_cert"] = certificate
+ changes["remove_certs"].remove(existing_certificate["alias"])
+ break
+
+ elif certificate["alias"] == existing_certificate["alias"]:
+ if (certificate["fingerprint"] != existing_certificate["shaFingerprint"] and
+ certificate["fingerprint"] != existing_certificate["sha256Fingerprint"]):
+ changes["add_certs"].append(certificate)
+ changes["change_required"] = True
+ changes["remove_certs"].remove(existing_certificate["alias"])
+ break
+
+ else:
+ changes["add_certs"].append(certificate)
+ changes["change_required"] = True
+
+ # Determine whether new self-signed certificate needs to be generated.
+ elif self.is_public_server_certificate_signed():
+ changes["change_required"] = True
+
+ return changes
+
+ def apply_self_signed_certificate(self):
+ """Install self-signed server certificate which is generated by the storage system itself."""
+ try:
+ rc, resp = self.request(self.url_path_prefix + "certificates/reset%s" % self.url_path_suffix, method="POST")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to reset SSL configuration back to a self-signed certificate! Array [%s]. Error [%s]." % (self.ssid, error))
+
+ def apply_signed_certificate(self, public_cert, private_key):
+ """Install authoritative signed server certificate whether csr is generated by storage system or not."""
+ if private_key is None:
+ headers, data = create_multipart_formdata([("file", "signed_server_certificate", public_cert["certificate"])])
+ else:
+ headers, data = create_multipart_formdata([("file", "signed_server_certificate", public_cert["certificate"]),
+ ("privateKey", "private_key", private_key)])
+
+ try:
+ rc, resp = self.request(self.url_path_prefix + "certificates/server%s&replaceMainServerCertificate=true" % self.url_path_suffix,
+ method="POST", headers=headers, data=data)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to upload signed server certificate! Array [%s]. Error [%s]." % (self.ssid, error))
+
+ def upload_authoritative_certificates(self, certificate):
+ """Install all authoritative certificates."""
+ headers, data = create_multipart_formdata([["file", certificate["alias"], certificate["certificate"]]])
+
+ try:
+ rc, resp = self.request(self.url_path_prefix + "certificates/server%s&alias=%s" % (self.url_path_suffix, certificate["alias"]),
+ method="POST", headers=headers, data=data)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to upload certificate authority! Array [%s]. Error [%s]." % (self.ssid, error))
+
+ def remove_authoritative_certificates(self, alias):
+ """Delete all authoritative certificates."""
+ try:
+ rc, resp = self.request(self.url_path_prefix + "certificates/server/%s%s" % (alias, self.url_path_suffix), method="DELETE")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to delete certificate authority! Array [%s]. Error [%s]." % (self.ssid, error))
+
+ def reload_ssl_configuration(self):
+ """Asynchronously reloads the SSL configuration."""
+ self.request(self.url_path_prefix + "certificates/reload%s" % self.url_path_suffix, method="POST", ignore_errors=True)
+
+ for retry in range(int(self.RESET_SSL_CONFIG_TIMEOUT_SEC / 3)):
+ try:
+ rc, current_certificates = self.request(self.url_path_prefix + "certificates/server%s" % self.url_path_suffix)
+ except Exception as error:
+ sleep(3)
+ continue
+ break
+ else:
+ self.module.fail_json(msg="Failed to retrieve server certificates. Array [%s]." % self.ssid)
+
+ def apply(self):
+ """Apply state changes to the storage array's truststore."""
+ if not HAS_CRYPTOGRAPHY:
+ self.module.fail_json(msg="Python cryptography package are missing!")
+
+ major, minor, patch = [int(item) for item in str(cryptography.__version__).split(".")]
+ if major < 2 or (major == 2 and minor < 5):
+ self.module.fail_json(msg="Python cryptography package version must greater than version 2.5! Version [%s]." % cryptography.__version__)
+
+ changes = self.determine_changes()
+ if changes["change_required"] and not self.module.check_mode:
+
+ if changes["signed_cert"]:
+ for certificate in changes["add_certs"]:
+ self.upload_authoritative_certificates(certificate)
+ for certificate_alias in changes["remove_certs"]:
+ self.remove_authoritative_certificates(certificate_alias)
+ if changes["public_cert"]:
+ self.apply_signed_certificate(changes["public_cert"], changes["private_key"])
+ self.reload_ssl_configuration()
+ else:
+ self.apply_self_signed_certificate()
+ self.reload_ssl_configuration()
+
+ self.module.exit_json(changed=changes["change_required"],
+ signed_server_certificate=changes["signed_cert"],
+ added_certificates=[cert["alias"] for cert in changes["add_certs"]],
+ removed_certificates=changes["remove_certs"])
+
+
+def main():
+ client_certs = NetAppESeriesServerCertificate()
+ client_certs.apply()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_snapshot.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_snapshot.py
new file mode 100644
index 000000000..67356c9dc
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_snapshot.py
@@ -0,0 +1,1578 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = """
+module: na_santricity_snapshot
+short_description: NetApp E-Series storage system's snapshots.
+description: Manage NetApp E-Series manage the storage system's snapshots.
+author: Nathan Swartz (@ndswartz)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.santricity_doc
+options:
+ state:
+ description:
+ - When I(state==absent) ensures the I(type) has been removed.
+ - When I(state==present) ensures the I(type) is available.
+ - When I(state==rollback) the consistency group will be rolled back to the point-in-time snapshot images selected by I(pit_name or pit_timestamp).
+ - I(state==rollback) will always return changed since it is not possible to evaluate the current state of the base volume in relation to a snapshot image.
+ type: str
+ choices:
+ - absent
+ - present
+ - rollback
+ default: present
+ required: false
+ type:
+ description:
+ - Type of snapshot object to effect.
+ - Group indicates a snapshot consistency group; consistency groups may have one or more base volume members which are defined in I(volumes).
+ - Pit indicates a snapshot consistency group point-in-time image(s); a snapshot image will be taken of each base volume when I(state==present).
+ - Warning! When I(state==absent and type==pit), I(pit_name) or I(pit_timestamp) must be defined and all point-in-time images created prior to the
+ selection will also be deleted.
+ - View indicates a consistency group snapshot volume of particular point-in-time image(s); snapshot volumes will be created for each base volume member.
+ - Views are created from images from a single point-in-time so once created they cannot be modified.
+ type: str
+ default: group
+ choices:
+ - group
+ - pit
+ - view
+ required: false
+ group_name:
+ description:
+ - Name of the snapshot consistency group or snapshot volume.
+ - Be sure to use different names for snapshot consistency groups and snapshot volumes to avoid name conflicts.
+ type: str
+ required: true
+ volumes:
+ description:
+ - Details for each consistency group base volume for defining reserve capacity, preferred reserve capacity storage pool, and snapshot volume options.
+ - When I(state==present and type==group) the volume entries will be used to add or remove base volume from a snapshot consistency group.
+ - When I(state==present and type==view) the volume entries will be used to select images from a point-in-time for their respective snapshot volumes.
+ - If I(state==present and type==view) and I(volume) is not specified then all volumes will be selected with the defaults.
+ - Views are created from images from a single point-in-time so once created they cannot be modified.
+ - When I(state==rollback) then I(volumes) can be used to specify which base volumes to rollback; otherwise all consistency group volumes will rollback.
+ type: list
+ required: false
+ suboptions:
+ volume:
+ description:
+ - Base volume for consistency group.
+ type: str
+ required: true
+ reserve_capacity_pct:
+ description:
+ - Percentage of base volume capacity to reserve for snapshot copy-on-writes (COW).
+ - Used to define reserve capacity for both snapshot consistency group volume members and snapshot volumes.
+ type: int
+ default: 40
+ required: false
+ preferred_reserve_storage_pool:
+ description:
+ - Preferred storage pool or volume group for the reserve capacity volume.
+ - The base volume's storage pool or volume group will be selected by default if not defined.
+ - Used to specify storage pool or volume group for both snapshot consistency group volume members and snapshot volumes
+ type: str
+ required: false
+ snapshot_volume_writable:
+ description:
+ - Whether snapshot volume of base volume images should be writable.
+ type: bool
+ default: true
+ required: false
+ snapshot_volume_validate:
+ description:
+ - Whether snapshot volume should be validated which includes both a media scan and parity validation.
+ type: bool
+ default: false
+ required: false
+ snapshot_volume_host:
+ description:
+ - Host or host group to map snapshot volume.
+ type: str
+ required: false
+ maximum_snapshots:
+ description:
+ - Total number of snapshot images to maintain.
+ type: int
+ default: 32
+ required: false
+ reserve_capacity_pct:
+ description:
+ - Default percentage of base volume capacity to reserve for snapshot copy-on-writes (COW).
+ - Used to define reserve capacity for both snapshot consistency group volume members and snapshot volumes.
+ type: int
+ default: 40
+ required: false
+ preferred_reserve_storage_pool:
+ description:
+ - Default preferred storage pool or volume group for the reserve capacity volume.
+ - The base volume's storage pool or volume group will be selected by default if not defined.
+ - Used to specify storage pool or volume group for both snapshot consistency group volume members and snapshot volumes
+ type: str
+ required: false
+ alert_threshold_pct:
+ description:
+ - Percent of filled reserve capacity to issue alert.
+ type: int
+ default: 75
+ required: false
+ reserve_capacity_full_policy:
+ description:
+ - Policy for full reserve capacity.
+ - Purge deletes the oldest snapshot image for the base volume in the consistency group.
+ - Reject writes to base volume (keep snapshot images valid).
+ choices:
+ - purge
+ - reject
+ type: str
+ default: purge
+ required: false
+ rollback_priority:
+ description:
+ - Storage system priority given to restoring snapshot point in time.
+ type: str
+ choices:
+ - highest
+ - high
+ - medium
+ - low
+ - lowest
+ default: medium
+ required: false
+ rollback_backup:
+ description:
+ - Whether a point-in-time snapshot should be taken prior to performing a rollback.
+ type: bool
+ default: true
+ required: false
+ pit_name:
+ description:
+ - Name of a consistency group's snapshot images.
+ type: str
+ required: false
+ pit_description:
+ description:
+ - Arbitrary description for a consistency group's snapshot images
+ type: str
+ required: false
+ pit_timestamp:
+ description:
+ - Snapshot image timestamp in the YYYY-MM-DD HH:MM:SS (AM|PM) (hours, minutes, seconds, and day-period are optional)
+ - Define only as much time as necessary to distinguish the desired snapshot image from the others.
+ - 24 hour time will be assumed if day-period indicator (AM, PM) is not specified.
+ - The terms latest and oldest may be used to select newest and oldest consistency group images.
+ - Mutually exclusive with I(pit_name or pit_description)
+ type: str
+ required: false
+ view_name:
+ description:
+ - Consistency group snapshot volume group.
+ - Required when I(state==volume) or when ensuring the views absence when I(state==absent).
+ type: str
+ required: false
+ view_host:
+ description:
+ - Default host or host group to map snapshot volumes.
+ type: str
+ required: false
+ view_writable:
+ description:
+ - Default whether snapshot volumes should be writable.
+ type: bool
+ default: true
+ required: false
+ view_validate:
+ description:
+ - Default whether snapshop volumes should be validated.
+ type: bool
+ default: false
+ required: false
+notes:
+ - Key-value pairs are used to keep track of snapshot names and descriptions since the snapshot point-in-time images do have metadata associated with their
+ data structures; therefore, it is necessary to clean out old keys that are no longer associated with an actual image. This cleaning action is performed each
+ time this module is executed.
+"""
+EXAMPLES = """
+- name: Ensure snapshot consistency group exists.
+ na_santricity_snapshot:
+ ssid: "1"
+ api_url: https://192.168.1.100:8443/devmgr/v2
+ api_username: admin
+ api_password: adminpass
+ state: present
+ type: group
+ group_name: snapshot_group1
+ volumes:
+ - volume: vol1
+ reserve_capacity_pct: 20
+ preferred_reserve_storage_pool: vg1
+ - volume: vol2
+ reserve_capacity_pct: 30
+ - volume: vol3
+ alert_threshold_pct: 80
+ maximum_snapshots: 30
+- name: Take the current consistency group's base volumes point-in-time snapshot images.
+ na_santricity_snapshot:
+ ssid: "1"
+ api_url: https://192.168.1.100:8443/devmgr/v2
+ api_username: admin
+ api_password: adminpass
+ state: present
+ type: pit
+ group_name: snapshot_group1
+ pit_name: pit1
+ pit_description: Initial consistency group's point-in-time snapshot images.
+- name: Ensure snapshot consistency group view exists and is mapped to host group.
+ na_santricity_snapshot:
+ ssid: "1"
+ api_url: https://192.168.1.100:8443/devmgr/v2
+ api_username: admin
+ api_password: adminpass
+ state: present
+ type: view
+ group_name: snapshot_group1
+ pit_name: pit1
+ view_name: view1
+ view_host: view1_hosts_group
+ volumes:
+ - volume: vol1
+ reserve_capacity_pct: 20
+ preferred_reserve_storage_pool: vg4
+ snapshot_volume_writable: false
+ snapshot_volume_validate: true
+ - volume: vol2
+ reserve_capacity_pct: 20
+ preferred_reserve_storage_pool: vg4
+ snapshot_volume_writable: true
+ snapshot_volume_validate: true
+ - volume: vol3
+ reserve_capacity_pct: 20
+ preferred_reserve_storage_pool: vg4
+ snapshot_volume_writable: false
+ snapshot_volume_validate: true
+ alert_threshold_pct: 80
+ maximum_snapshots: 30
+- name: Rollback base volumes to consistency group's point-in-time pit1.
+ na_santricity_snapshot:
+ ssid: "1"
+ api_url: https://192.168.1.100:8443/devmgr/v2
+ api_username: admin
+ api_password: adminpass
+ state: present
+ type: group
+ group_name: snapshot_group1
+ pit_name: pit1
+ rollback: true
+ rollback_priority: high
+- name: Ensure snapshot consistency group view no longer exists.
+ na_santricity_snapshot:
+ ssid: "1"
+ api_url: https://192.168.1.100:8443/devmgr/v2
+ api_username: admin
+ api_password: adminpass
+ state: absent
+ type: view
+ group_name: snapshot_group1
+ view_name: view1
+- name: Ensure that the consistency group's base volumes point-in-time snapshot images pit1 no longer exists.
+ na_santricity_snapshot:
+ ssid: "1"
+ api_url: https://192.168.1.100:8443/devmgr/v2
+ api_username: admin
+ api_password: adminpass
+ state: absent
+ type: image
+ group_name: snapshot_group1
+ pit_name: pit1
+- name: Ensure snapshot consistency group no longer exists.
+ na_santricity_snapshot:
+ ssid: "1"
+ api_url: https://192.168.1.100:8443/devmgr/v2
+ api_username: admin
+ api_password: adminpass
+ state: absent
+ type: group
+ group_name: snapshot_group1
+"""
+RETURN = """
+changed:
+ description: Whether changes have been made.
+ type: bool
+ returned: always
+group_changes:
+ description: All changes performed to the consistency group.
+ type: dict
+ returned: always
+deleted_metadata_keys:
+ description: Keys that were purged from the key-value datastore.
+ type: list
+ returned: always
+"""
+from datetime import datetime
+import re
+from time import sleep
+
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule
+
+
+class NetAppESeriesSnapshot(NetAppESeriesModule):
+ def __init__(self):
+ ansible_options = dict(state=dict(type="str", default="present", choices=["absent", "present", "rollback"], required=False),
+ type=dict(type="str", default="group", choices=["group", "pit", "view"], required=False),
+ group_name=dict(type="str", required=True),
+ volumes=dict(type="list", required=False,
+ suboptions=dict(volume=dict(type="str", required=True),
+ reserve_capacity_pct=dict(type="int", default=40, required=False),
+ preferred_reserve_storage_pool=dict(type="str", required=False),
+ snapshot_volume_writable=dict(type="bool", default=True, required=False),
+ snapshot_volume_validate=dict(type="bool", default=False, required=False),
+ snapshot_volume_host=dict(type="str", default=None, required=False),
+ snapshot_volume_lun=dict(type="int", default=None, required=False))),
+ maximum_snapshots=dict(type="int", default=32, required=False),
+ reserve_capacity_pct=dict(type="int", default=40, required=False),
+ preferred_reserve_storage_pool=dict(type="str", required=False),
+ alert_threshold_pct=dict(type="int", default=75, required=False),
+ reserve_capacity_full_policy=dict(type="str", default="purge", choices=["purge", "reject"], required=False),
+ rollback_priority=dict(type="str", default="medium", choices=["highest", "high", "medium", "low", "lowest"], required=False),
+ rollback_backup=dict(type="bool", default=True, required=False),
+ pit_name=dict(type="str", required=False),
+ pit_description=dict(type="str", required=False),
+ pit_timestamp=dict(type="str", required=False),
+ view_name=dict(type="str", required=False),
+ view_host=dict(type="str", default=None, required=False),
+ view_writable=dict(type="bool", default=True, required=False),
+ view_validate=dict(type="bool", default=False, required=False))
+
+ super(NetAppESeriesSnapshot, self).__init__(ansible_options=ansible_options,
+ web_services_version="05.00.0000.0000",
+ supports_check_mode=True)
+ args = self.module.params
+ self.state = args["state"]
+ self.type = args["type"]
+ self.group_name = args["group_name"]
+ self.maximum_snapshots = args["maximum_snapshots"]
+ self.reserve_capacity_pct = args["reserve_capacity_pct"]
+ self.preferred_reserve_storage_pool = args["preferred_reserve_storage_pool"]
+ self.alert_threshold_pct = args["alert_threshold_pct"]
+ self.reserve_capacity_full_policy = "purgepit" if args["reserve_capacity_full_policy"] == "purge" else "failbasewrites"
+ self.rollback_priority = args["rollback_priority"]
+ self.rollback_backup = args["rollback_backup"]
+ self.rollback_priority = args["rollback_priority"]
+ self.pit_name = args["pit_name"]
+ self.pit_description = args["pit_description"]
+ self.view_name = args["view_name"]
+ self.view_host = args["view_host"]
+ self.view_writable = args["view_writable"]
+ self.view_validate = args["view_validate"]
+
+ # Complete volume definitions.
+ self.volumes = {}
+ if args["volumes"]:
+ for volume_info in args["volumes"]:
+ reserve_capacity_pct = volume_info["reserve_capacity_pct"] if "reserve_capacity_pct" in volume_info else self.reserve_capacity_pct
+ snapshot_volume_writable = volume_info["snapshot_volume_writable"] if "snapshot_volume_writable" in volume_info else self.view_writable
+ snapshot_volume_validate = volume_info["snapshot_volume_validate"] if "snapshot_volume_validate" in volume_info else self.view_validate
+ snapshot_volume_host = volume_info["snapshot_volume_host"] if "snapshot_volume_host" in volume_info else self.view_host
+ snapshot_volume_lun = volume_info["snapshot_volume_lun"] if "snapshot_volume_lun" in volume_info else None
+ if "preferred_reserve_storage_pool" in volume_info and volume_info["preferred_reserve_storage_pool"]:
+ preferred_reserve_storage_pool = volume_info["preferred_reserve_storage_pool"]
+ else:
+ preferred_reserve_storage_pool = self.preferred_reserve_storage_pool
+
+ self.volumes.update({volume_info["volume"]: {"reserve_capacity_pct": reserve_capacity_pct,
+ "preferred_reserve_storage_pool": preferred_reserve_storage_pool,
+ "snapshot_volume_writable": snapshot_volume_writable,
+ "snapshot_volume_validate": snapshot_volume_validate,
+ "snapshot_volume_host": snapshot_volume_host,
+ "snapshot_volume_lun": snapshot_volume_lun}})
+
+ # Check and convert pit_timestamp to datetime object. volume: snap-vol1
+ self.pit_timestamp = None
+ self.pit_timestamp_tokens = 0
+ if args["pit_timestamp"]:
+ if args["pit_timestamp"] in ["newest", "oldest"]:
+ self.pit_timestamp = args["pit_timestamp"]
+ elif re.match("[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2} (AM|PM|am|pm)", args["pit_timestamp"]):
+ self.pit_timestamp = datetime.strptime(args["pit_timestamp"], "%Y-%m-%d %I:%M:%S %p")
+ self.pit_timestamp_tokens = 6
+ elif re.match("[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2} (AM|PM|am|pm)", args["pit_timestamp"]):
+ self.pit_timestamp = datetime.strptime(args["pit_timestamp"], "%Y-%m-%d %I:%M %p")
+ self.pit_timestamp_tokens = 5
+ elif re.match("[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2} (AM|PM|am|pm)", args["pit_timestamp"]):
+ self.pit_timestamp = datetime.strptime(args["pit_timestamp"], "%Y-%m-%d %I %p")
+ self.pit_timestamp_tokens = 4
+ elif re.match("[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}", args["pit_timestamp"]):
+ self.pit_timestamp = datetime.strptime(args["pit_timestamp"], "%Y-%m-%d %H:%M:%S")
+ self.pit_timestamp_tokens = 6
+ elif re.match("[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}", args["pit_timestamp"]):
+ self.pit_timestamp = datetime.strptime(args["pit_timestamp"], "%Y-%m-%d %H:%M")
+ self.pit_timestamp_tokens = 5
+ elif re.match("[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}", args["pit_timestamp"]):
+ self.pit_timestamp = datetime.strptime(args["pit_timestamp"], "%Y-%m-%d %H")
+ self.pit_timestamp_tokens = 4
+ elif re.match("[0-9]{4}-[0-9]{2}-[0-9]{2}", args["pit_timestamp"]):
+ self.pit_timestamp = datetime.strptime(args["pit_timestamp"], "%Y-%m-%d")
+ self.pit_timestamp_tokens = 3
+ else:
+ self.module.fail_json(msg="Invalid argument! pit_timestamp must be in the form YYYY-MM-DD HH:MM:SS (AM|PM) (time portion is optional)."
+ " Array [%s]." % self.ssid)
+
+ # Check for required arguments
+ if self.state == "present":
+ if self.type == "group":
+ if not self.volumes:
+ self.module.fail_json(msg="Missing argument! Volumes must be defined to create a snapshot consistency group."
+ " Group [%s]. Array [%s]" % (self.group_name, self.ssid))
+ elif self.type == "pit":
+ if self.pit_timestamp and self.pit_name:
+ self.module.fail_json(msg="Invalid arguments! Either define pit_name with or without pit_description or pit_timestamp."
+ " Group [%s]. Array [%s]" % (self.group_name, self.ssid))
+
+ elif self.type == "view":
+ if not self.view_name:
+ self.module.fail_json(msg="Missing argument! view_name must be defined to create a snapshot consistency group view."
+ " Group [%s]. Array [%s]" % (self.group_name, self.ssid))
+ if not (self.pit_name or self.pit_timestamp):
+ self.module.fail_json(msg="Missing argument! Either pit_name or pit_timestamp must be defined to create a consistency group point-in-time"
+ " snapshot. Group [%s]. Array [%s]" % (self.group_name, self.ssid))
+ elif self.state == "rollback":
+ if not (self.pit_name or self.pit_timestamp):
+ self.module.fail_json(msg="Missing argument! Either pit_name or pit_timestamp must be defined to create a consistency group point-in-time"
+ " snapshot. Group [%s]. Array [%s]" % (self.group_name, self.ssid))
+ else:
+ if self.type == "pit":
+ if self.pit_name and self.pit_timestamp:
+ self.module.fail_json(msg="Invalid arguments! Either define pit_name or pit_timestamp."
+ " Group [%s]. Array [%s]" % (self.group_name, self.ssid))
+ if not (self.pit_name or self.pit_timestamp):
+ self.module.fail_json(msg="Missing argument! Either pit_name or pit_timestamp must be defined to create a consistency group point-in-time"
+ " snapshot. Group [%s]. Array [%s]" % (self.group_name, self.ssid))
+ elif self.type == "view":
+ if not self.view_name:
+ self.module.fail_json(msg="Missing argument! view_name must be defined to create a snapshot consistency group view."
+ " Group [%s]. Array [%s]" % (self.group_name, self.ssid))
+
+ # Check whether request needs to be forwarded on to the controller web services rest api.
+ self.url_path_prefix = ""
+ if not self.is_embedded():
+ if self.ssid == "0" or self.ssid.lower() == "proxy":
+ self.module.fail_json(msg="Snapshot is not a valid operation for SANtricity Web Services Proxy! ssid cannot be '0' or 'proxy'."
+ " Array [%s]" % self.ssid)
+ self.url_path_prefix = "storage-systems/%s/forward/devmgr/v2/" % self.ssid
+
+ self.cache = {"get_consistency_group": {},
+ "get_all_storage_pools_by_id": {},
+ "get_all_storage_pools_by_name": {},
+ "get_all_volumes_by_id": {},
+ "get_all_volumes_by_name": {},
+ "get_all_hosts_and_hostgroups_by_name": {},
+ "get_all_hosts_and_hostgroups_by_id": {},
+ "get_mapping_by_id": {},
+ "get_mapping_by_name": {},
+ "get_all_concat_volumes_by_id": {},
+ "get_pit_images_by_timestamp": {},
+ "get_pit_images_by_name": {},
+ "get_pit_images_metadata": {},
+ "get_unused_pit_key_values": [],
+ "get_pit_info": None,
+ "get_consistency_group_view": {},
+ "view_changes_required": []}
+
+ def get_all_storage_pools_by_id(self):
+ """Retrieve and return all storage pools/volume groups."""
+ if not self.cache["get_all_storage_pools_by_id"]:
+ try:
+ rc, storage_pools = self.request("storage-systems/%s/storage-pools" % self.ssid)
+
+ for storage_pool in storage_pools:
+ self.cache["get_all_storage_pools_by_id"].update({storage_pool["id"]: storage_pool})
+ self.cache["get_all_storage_pools_by_name"].update({storage_pool["name"]: storage_pool})
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve volumes! Error [%s]. Array [%s]." % (error, self.ssid))
+
+ return self.cache["get_all_storage_pools_by_id"]
+
+ def get_all_storage_pools_by_name(self):
+ """Retrieve and return all storage pools/volume groups."""
+ if not self.cache["get_all_storage_pools_by_name"]:
+ self.get_all_storage_pools_by_id()
+
+ return self.cache["get_all_storage_pools_by_name"]
+
+ def get_all_volumes_by_id(self):
+ """Retrieve and return a dictionary of all thick and thin volumes keyed by id."""
+ if not self.cache["get_all_volumes_by_id"]:
+ try:
+ rc, thick_volumes = self.request("storage-systems/%s/volumes" % self.ssid)
+ rc, thin_volumes = self.request("storage-systems/%s/thin-volumes" % self.ssid)
+
+ for volume in thick_volumes + thin_volumes:
+ self.cache["get_all_volumes_by_id"].update({volume["id"]: volume})
+ self.cache["get_all_volumes_by_name"].update({volume["name"]: volume})
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve volumes! Error [%s]. Array [%s]." % (error, self.ssid))
+
+ return self.cache["get_all_volumes_by_id"]
+
+ def get_all_volumes_by_name(self):
+ """Retrieve and return a dictionary of all thick and thin volumes keyed by name."""
+ if not self.cache["get_all_volumes_by_name"]:
+ self.get_all_volumes_by_id()
+
+ return self.cache["get_all_volumes_by_name"]
+
+ def get_all_hosts_and_hostgroups_by_id(self):
+ """Retrieve and return a dictionary of all host and host groups keyed by name."""
+ if not self.cache["get_all_hosts_and_hostgroups_by_id"]:
+ try:
+ rc, hostgroups = self.request("storage-systems/%s/host-groups" % self.ssid)
+ # hostgroup_by_id = {hostgroup["id"]: hostgroup for hostgroup in hostgroups}
+ hostgroup_by_id = dict((hostgroup["id"], hostgroup) for hostgroup in hostgroups)
+
+ rc, hosts = self.request("storage-systems/%s/hosts" % self.ssid)
+ for host in hosts:
+ if host["clusterRef"] != "0000000000000000000000000000000000000000":
+ hostgroup_name = hostgroup_by_id[host["clusterRef"]]["name"]
+
+ if host["clusterRef"] not in self.cache["get_all_hosts_and_hostgroups_by_id"].keys():
+ hostgroup_by_id[host["clusterRef"]].update({"hostgroup": True, "host_ids": [host["id"]]})
+ self.cache["get_all_hosts_and_hostgroups_by_id"].update({host["clusterRef"]: hostgroup_by_id[host["clusterRef"]]})
+ self.cache["get_all_hosts_and_hostgroups_by_name"].update({hostgroup_name: hostgroup_by_id[host["clusterRef"]]})
+ else:
+ self.cache["get_all_hosts_and_hostgroups_by_id"][host["clusterRef"]]["host_ids"].append(host["id"])
+ self.cache["get_all_hosts_and_hostgroups_by_name"][hostgroup_name]["host_ids"].append(host["id"])
+
+ self.cache["get_all_hosts_and_hostgroups_by_id"].update({host["id"]: host, "hostgroup": False})
+ self.cache["get_all_hosts_and_hostgroups_by_name"].update({host["name"]: host, "hostgroup": False})
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve all host and host group objects! Error [%s]. Array [%s]." % (error, self.ssid))
+
+ return self.cache["get_all_hosts_and_hostgroups_by_id"]
+
+ def get_all_hosts_and_hostgroups_by_name(self):
+ """Retrieve and return a dictionary of all thick and thin volumes keyed by name."""
+ if not self.cache["get_all_hosts_and_hostgroups_by_name"]:
+ self.get_all_hosts_and_hostgroups_by_id()
+
+ return self.cache["get_all_hosts_and_hostgroups_by_name"]
+
+ def get_mapping_by_id(self):
+ """Retrieve and return a dictionary of """
+ if not self.cache["get_mapping_by_id"]:
+ existing_hosts_and_hostgroups_by_id = self.get_all_hosts_and_hostgroups_by_id()
+ existing_hosts_and_hostgroups_by_name = self.get_all_hosts_and_hostgroups_by_name()
+ try:
+ rc, mappings = self.request("storage-systems/%s/volume-mappings" % self.ssid)
+
+ for mapping in mappings:
+ host_ids = [mapping["mapRef"]]
+ map_entry = {mapping["lun"]: mapping["volumeRef"]}
+
+ if mapping["type"] == "cluster":
+ host_ids = existing_hosts_and_hostgroups_by_id[mapping["mapRef"]]["host_ids"]
+ if mapping["mapRef"] in self.cache["get_mapping_by_id"].keys():
+ self.cache["get_mapping_by_id"][mapping["mapRef"]].update(map_entry)
+ self.cache["get_mapping_by_name"][mapping["mapRef"]].update(map_entry)
+ else:
+ self.cache["get_mapping_by_id"].update({mapping["mapRef"]: map_entry})
+ self.cache["get_mapping_by_name"].update({mapping["mapRef"]: map_entry})
+
+ for host_id in host_ids:
+ if host_id in self.cache["get_mapping_by_id"].keys():
+ self.cache["get_mapping_by_id"][mapping["mapRef"]].update(map_entry)
+ self.cache["get_mapping_by_name"][mapping["mapRef"]].update(map_entry)
+ else:
+ self.cache["get_mapping_by_id"].update({host_id: map_entry})
+ self.cache["get_mapping_by_name"].update({host_id: map_entry})
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve all volume map definitions! Error [%s]. Array [%s]." % (error, self.ssid))
+
+ return self.cache["get_mapping_by_id"]
+
+ def get_mapping_by_name(self):
+ """Retrieve and return a dictionary of """
+ if not self.cache["get_mapping_by_name"]:
+ self.get_mapping_by_id()
+
+ return self.cache["get_mapping_by_name"]
+
+ def get_all_concat_volumes_by_id(self):
+ """Retrieve and return a dictionary of all thick and thin volumes keyed by id."""
+ if not self.cache["get_all_concat_volumes_by_id"]:
+ try:
+ rc, concat_volumes = self.request("storage-systems/%s/repositories/concat" % self.ssid)
+
+ for volume in concat_volumes:
+ self.cache["get_all_concat_volumes_by_id"].update({volume["id"]: volume})
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve reserve capacity volumes! Error [%s]. Array [%s]." % (error, self.ssid))
+
+ return self.cache["get_all_concat_volumes_by_id"]
+
+ def get_consistency_group(self):
+ """Retrieve consistency groups and return information on the expected group."""
+ existing_volumes = self.get_all_volumes_by_id()
+
+ if not self.cache["get_consistency_group"]:
+ try:
+ rc, consistency_groups = self.request("storage-systems/%s/consistency-groups" % self.ssid)
+
+ for consistency_group in consistency_groups:
+ if consistency_group["label"] == self.group_name:
+ rc, member_volumes = self.request("storage-systems/%s/consistency-groups/%s/member-volumes" % (self.ssid, consistency_group["id"]))
+
+ self.cache["get_consistency_group"].update({"consistency_group_id": consistency_group["cgRef"],
+ "alert_threshold_pct": consistency_group["fullWarnThreshold"],
+ "maximum_snapshots": consistency_group["autoDeleteLimit"],
+ "rollback_priority": consistency_group["rollbackPriority"],
+ "reserve_capacity_full_policy": consistency_group["repFullPolicy"],
+ "sequence_numbers": consistency_group["uniqueSequenceNumber"],
+ "base_volumes": []})
+
+ for member_volume in member_volumes:
+ base_volume = existing_volumes[member_volume["volumeId"]]
+ base_volume_size_b = int(base_volume["totalSizeInBytes"])
+ total_reserve_capacity_b = int(member_volume["totalRepositoryCapacity"])
+ reserve_capacity_pct = int(round(float(total_reserve_capacity_b) / float(base_volume_size_b) * 100))
+
+ rc, concat = self.request("storage-systems/%s/repositories/concat/%s" % (self.ssid, member_volume["repositoryVolume"]))
+
+ self.cache["get_consistency_group"]["base_volumes"].append({"name": base_volume["name"],
+ "id": base_volume["id"],
+ "base_volume_size_b": base_volume_size_b,
+ "total_reserve_capacity_b": total_reserve_capacity_b,
+ "reserve_capacity_pct": reserve_capacity_pct,
+ "repository_volume_info": concat})
+ break
+
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve snapshot consistency groups! Error [%s]. Array [%s]." % (error, self.ssid))
+
+ return self.cache["get_consistency_group"]
+
+ def get_candidate(self, volume_name, volume_info):
+ """Return candidate for volume."""
+ existing_storage_pools_by_id = self.get_all_storage_pools_by_id()
+ existing_storage_pools_by_name = self.get_all_storage_pools_by_name()
+ existing_volumes_by_name = self.get_all_volumes_by_name()
+
+ if volume_name in existing_volumes_by_name:
+ base_volume_storage_pool_id = existing_volumes_by_name[volume_name]["volumeGroupRef"]
+ base_volume_storage_pool_name = existing_storage_pools_by_id[base_volume_storage_pool_id]["name"]
+
+ preferred_reserve_storage_pool = base_volume_storage_pool_id
+ if volume_info["preferred_reserve_storage_pool"]:
+ if volume_info["preferred_reserve_storage_pool"] in existing_storage_pools_by_name:
+ preferred_reserve_storage_pool = existing_storage_pools_by_name[volume_info["preferred_reserve_storage_pool"]]["id"]
+ else:
+ self.module.fail_json(msg="Preferred storage pool or volume group does not exist! Storage pool [%s]. Group [%s]."
+ " Array [%s]." % (volume_info["preferred_reserve_storage_pool"], self.group_name, self.ssid))
+
+ volume_info.update({"name": volume_name,
+ "id": existing_volumes_by_name[volume_name]["id"],
+ "storage_pool_name": base_volume_storage_pool_name,
+ "storage_pool_id": base_volume_storage_pool_id,
+ "preferred_reserve_storage_pool": preferred_reserve_storage_pool,
+ "candidate": None})
+
+ else:
+ self.module.fail_json(msg="Volume does not exist! Volume [%s]. Group [%s]. Array [%s]." % (volume_name, self.group_name, self.ssid))
+
+ candidate_request = {"candidateRequest": {"baseVolumeRef": volume_info["id"],
+ "percentCapacity": volume_info["reserve_capacity_pct"],
+ "concatVolumeType": "snapshot"}}
+ try:
+ rc, candidates = self.request("storage-systems/%s/repositories/concat/single" % self.ssid, method="POST", data=candidate_request)
+ for candidate in candidates:
+ if candidate["volumeGroupId"] == volume_info["preferred_reserve_storage_pool"]:
+ volume_info["candidate"] = candidate
+ break
+ else:
+ self.module.fail_json(msg="Failed to retrieve capacity volume candidate in preferred storage pool or volume group!"
+ " Volume [%s]. Group [%s]. Array [%s]." % (volume_info["name"], self.group_name, self.ssid))
+ except Exception as error:
+ self.module.fail_json(msg="Failed to get reserve capacity candidates!"
+ " Volumes %s. Group [%s]. Array [%s]. Error [%s]" % (volume_info["name"], self.group_name, self.ssid, error))
+
+ return volume_info
+
+ def get_pit_images_metadata(self):
+ """Retrieve and return consistency group snapshot images' metadata keyed on timestamps."""
+ if not self.cache["get_pit_images_metadata"]:
+ try:
+ rc, key_values = self.request(self.url_path_prefix + "key-values")
+
+ for entry in key_values:
+ if re.search("ansible\\|%s\\|" % self.group_name, entry["key"]):
+ name = entry["key"].replace("ansible|%s|" % self.group_name, "")
+ values = entry["value"].split("|")
+ if len(values) == 3:
+ timestamp, image_id, description = values
+ self.cache["get_pit_images_metadata"].update({timestamp: {"name": name, "description": description}})
+
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve consistency group snapshot images metadata! Array [%s]. Error [%s]." % (self.ssid, error))
+
+ return self.cache["get_pit_images_metadata"]
+
+ def get_pit_images_by_timestamp(self):
+ """Retrieve and return snapshot images."""
+ if not self.cache["get_pit_images_by_timestamp"]:
+ group_id = self.get_consistency_group()["consistency_group_id"]
+ images_metadata = self.get_pit_images_metadata()
+ existing_volumes_by_id = self.get_all_volumes_by_id()
+
+ try:
+ rc, images = self.request("storage-systems/%s/consistency-groups/%s/snapshots" % (self.ssid, group_id))
+ for image_info in images:
+
+ metadata = {"id": "", "name": "", "description": ""}
+ if image_info["pitTimestamp"] in images_metadata.keys():
+ metadata = images_metadata[image_info["pitTimestamp"]]
+
+ timestamp = datetime.fromtimestamp(int(image_info["pitTimestamp"]))
+ info = {"id": image_info["id"],
+ "name": metadata["name"],
+ "timestamp": timestamp,
+ "description": metadata["description"],
+ "sequence_number": image_info["pitSequenceNumber"],
+ "base_volume_id": image_info["baseVol"],
+ "base_volume_name": existing_volumes_by_id[image_info["baseVol"]]["name"],
+ "image_info": image_info}
+
+ if timestamp not in self.cache["get_pit_images_by_timestamp"].keys():
+ self.cache["get_pit_images_by_timestamp"].update({timestamp: {"sequence_number": image_info["pitSequenceNumber"], "images": [info]}})
+ if metadata["name"]:
+ self.cache["get_pit_images_by_name"].update({metadata["name"]: {"sequence_number": image_info["pitSequenceNumber"],
+ "images": [info]}})
+ else:
+ self.cache["get_pit_images_by_timestamp"][timestamp]["images"].append(info)
+ if metadata["name"]:
+ self.cache["get_pit_images_by_name"][metadata["name"]]["images"].append(info)
+
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve consistency group snapshot images!"
+ " Group [%s]. Array [%s]. Error [%s]." % (self.group_name, self.ssid, error))
+
+ return self.cache["get_pit_images_by_timestamp"]
+
+ def get_pit_images_by_name(self):
+ """Retrieve and return snapshot images."""
+ if not self.cache["get_pit_images_by_name"]:
+ self.get_pit_images_by_timestamp()
+
+ return self.cache["get_pit_images_by_name"]
+
+ def get_unused_pit_key(self):
+ """Determine all embedded pit key-values that do not match existing snapshot images."""
+ if not self.cache["get_unused_pit_key_values"]:
+ try:
+ rc, images = self.request("storage-systems/%s/snapshot-images" % self.ssid)
+ rc, key_values = self.request("key-values")
+
+ for key_value in key_values:
+ key = key_value["key"]
+ value = key_value["value"]
+ if re.match("ansible\\|.*\\|.*", value):
+ for image in images:
+ if str(image["pitTimestamp"]) == value.split("|")[0]:
+ break
+ else:
+ self.cache["get_unused_pit_key_values"].append(key)
+ except Exception as error:
+ self.module.warn("Failed to retrieve all snapshots to determine all key-value pairs that do no match a point-in-time snapshot images!"
+ " Array [%s]. Error [%s]." % (self.ssid, error))
+
+ return self.cache["get_unused_pit_key_values"]
+
+ def get_pit_info(self):
+ """Determine consistency group's snapshot images base on provided arguments (pit_name or timestamp)."""
+
+ def _check_timestamp(timestamp):
+ """Check whether timestamp matches I(pit_timestamp)"""
+ return (self.pit_timestamp.year == timestamp.year and
+ self.pit_timestamp.month == timestamp.month and
+ self.pit_timestamp.day == timestamp.day and
+ (self.pit_timestamp_tokens < 4 or self.pit_timestamp.hour == timestamp.hour) and
+ (self.pit_timestamp_tokens < 5 or self.pit_timestamp.minute == timestamp.minute) and
+ (self.pit_timestamp_tokens < 6 or self.pit_timestamp.second == timestamp.second))
+
+ if self.cache["get_pit_info"] is None:
+ group = self.get_consistency_group()
+ pit_images_by_timestamp = self.get_pit_images_by_timestamp()
+ pit_images_by_name = self.get_pit_images_by_name()
+
+ if self.pit_name:
+ if self.pit_name in pit_images_by_name.keys():
+ self.cache["get_pit_info"] = pit_images_by_name[self.pit_name]
+
+ if self.pit_timestamp:
+ for image in self.cache["get_pit_info"]["images"]:
+ if not _check_timestamp(image["timestamp"]):
+ self.module.fail_json(msg="Snapshot image does not exist that matches both name and supplied timestamp!"
+ " Group [%s]. Image [%s]. Array [%s]." % (self.group_name, image, self.ssid))
+ elif self.pit_timestamp and pit_images_by_timestamp:
+ sequence_number = None
+ if self.pit_timestamp == "newest":
+ sequence_number = group["sequence_numbers"][-1]
+
+ for image_timestamp in pit_images_by_timestamp.keys():
+ if int(pit_images_by_timestamp[image_timestamp]["sequence_number"]) == int(sequence_number):
+ self.cache["get_pit_info"] = pit_images_by_timestamp[image_timestamp]
+ break
+ elif self.pit_timestamp == "oldest":
+ sequence_number = group["sequence_numbers"][0]
+ for image_timestamp in pit_images_by_timestamp.keys():
+ if int(pit_images_by_timestamp[image_timestamp]["sequence_number"]) == int(sequence_number):
+ self.cache["get_pit_info"] = pit_images_by_timestamp[image_timestamp]
+ break
+ else:
+ for image_timestamp in pit_images_by_timestamp.keys():
+ if _check_timestamp(image_timestamp):
+ if sequence_number and sequence_number != pit_images_by_timestamp[image_timestamp]["sequence_number"]:
+ self.module.fail_json(msg="Multiple snapshot images match the provided timestamp and do not have the same sequence number!"
+ " Group [%s]. Array [%s]." % (self.group_name, self.ssid))
+
+ sequence_number = pit_images_by_timestamp[image_timestamp]["sequence_number"]
+ self.cache["get_pit_info"] = pit_images_by_timestamp[image_timestamp]
+
+ if self.state != "absent" and self.type != "pit" and self.cache["get_pit_info"] is None:
+ self.module.fail_json(msg="Snapshot consistency group point-in-time image does not exist! Name [%s]. Timestamp [%s]. Group [%s]."
+ " Array [%s]." % (self.pit_name, self.pit_timestamp, self.group_name, self.ssid))
+
+ return self.cache["get_pit_info"]
+
+ def create_changes_required(self):
+ """Determine the required state changes for creating a new consistency group."""
+ changes = {"create_group": {"name": self.group_name,
+ "alert_threshold_pct": self.alert_threshold_pct,
+ "maximum_snapshots": self.maximum_snapshots,
+ "reserve_capacity_full_policy": self.reserve_capacity_full_policy,
+ "rollback_priority": self.rollback_priority},
+ "add_volumes": self.volumes}
+
+ return changes
+
+ def update_changes_required(self):
+ """Determine the required state changes for updating an existing consistency group."""
+ group = self.get_consistency_group()
+ changes = {"update_group": {},
+ "add_volumes": [],
+ "remove_volumes": [],
+ "expand_reserve_capacity": [],
+ "trim_reserve_capacity": []}
+
+ # Check if consistency group settings need to be updated.
+ if group["alert_threshold_pct"] != self.alert_threshold_pct:
+ changes["update_group"].update({"alert_threshold_pct": self.alert_threshold_pct})
+ if group["maximum_snapshots"] != self.maximum_snapshots:
+ changes["update_group"].update({"maximum_snapshots": self.maximum_snapshots})
+ if group["rollback_priority"] != self.rollback_priority:
+ changes["update_group"].update({"rollback_priority": self.rollback_priority})
+ if group["reserve_capacity_full_policy"] != self.reserve_capacity_full_policy:
+ changes["update_group"].update({"reserve_capacity_full_policy": self.reserve_capacity_full_policy})
+
+ # Check if base volumes need to be added or removed from consistency group.
+ # remaining_base_volumes = {base_volumes["name"]: base_volumes for base_volumes in group["base_volumes"]} # NOT python2.6 compatible
+ remaining_base_volumes = dict((base_volumes["name"], base_volumes) for base_volumes in group["base_volumes"])
+ add_volumes = {}
+ expand_volumes = {}
+
+ for volume_name, volume_info in self.volumes.items():
+ reserve_capacity_pct = volume_info["reserve_capacity_pct"]
+ if volume_name in remaining_base_volumes:
+
+ # Check if reserve capacity needs to be expanded or trimmed.
+ base_volume_reserve_capacity_pct = remaining_base_volumes[volume_name]["reserve_capacity_pct"]
+ if reserve_capacity_pct > base_volume_reserve_capacity_pct:
+ expand_reserve_capacity_pct = reserve_capacity_pct - base_volume_reserve_capacity_pct
+ expand_volumes.update({volume_name: {"reserve_capacity_pct": expand_reserve_capacity_pct,
+ "preferred_reserve_storage_pool": volume_info["preferred_reserve_storage_pool"],
+ "reserve_volume_id": remaining_base_volumes[volume_name]["repository_volume_info"]["id"]}})
+
+ elif reserve_capacity_pct < base_volume_reserve_capacity_pct:
+ existing_volumes_by_id = self.get_all_volumes_by_id()
+ existing_volumes_by_name = self.get_all_volumes_by_name()
+ existing_concat_volumes_by_id = self.get_all_concat_volumes_by_id()
+ trim_pct = base_volume_reserve_capacity_pct - reserve_capacity_pct
+
+ # Check whether there are any snapshot images; if there are then throw an exception indicating that a trim operation
+ # cannot be done when snapshots exist.
+ for timestamp, image in self.get_pit_images_by_timestamp():
+ if existing_volumes_by_id(image["base_volume_id"])["name"] == volume_name:
+ self.module.fail_json(msg="Reserve capacity cannot be trimmed when snapshot images exist for base volume!"
+ " Base volume [%s]. Group [%s]. Array [%s]." % (volume_name, self.group_name, self.ssid))
+
+ # Collect information about all that needs to be trimmed to meet or exceed required trim percentage.
+ concat_volume_id = remaining_base_volumes[volume_name]["repository_volume_info"]["id"]
+ concat_volume_info = existing_concat_volumes_by_id[concat_volume_id]
+ base_volume_info = existing_volumes_by_name[volume_name]
+ base_volume_size_bytes = int(base_volume_info["totalSizeInBytes"])
+
+ total_member_volume_size_bytes = 0
+ member_volumes_to_trim = []
+ for trim_count, member_volume_id in enumerate(reversed(concat_volume_info["memberRefs"][1:])):
+ member_volume_info = existing_volumes_by_id[member_volume_id]
+ member_volumes_to_trim.append(member_volume_info)
+
+ total_member_volume_size_bytes += int(member_volume_info["totalSizeInBytes"])
+ total_trimmed_size_pct = round(total_member_volume_size_bytes / base_volume_size_bytes * 100)
+
+ if total_trimmed_size_pct >= trim_pct:
+ changes["trim_reserve_capacity"].append({"concat_volume_id": concat_volume_id, "trim_count": trim_count + 1})
+
+ # Expand after trim if needed.
+ if total_trimmed_size_pct > trim_pct:
+ expand_reserve_capacity_pct = total_trimmed_size_pct - trim_pct
+ expand_volumes.update({volume_name: {"reserve_capacity_pct": expand_reserve_capacity_pct,
+ "preferred_reserve_storage_pool": volume_info["preferred_reserve_storage_pool"],
+ "reserve_volume_id": remaining_base_volumes[volume_name]["repository_volume_info"]["id"]}})
+ break
+ else:
+ initial_reserve_volume_info = existing_volumes_by_id[concat_volume_info["memberRefs"][0]]
+ minimum_capacity_pct = round(int(initial_reserve_volume_info["totalSizeInBytes"]) / base_volume_size_bytes * 100)
+ self.module.fail_json(msg="Cannot delete initial reserve capacity volume! Minimum reserve capacity percent [%s]. Base volume [%s]. "
+ "Group [%s]. Array [%s]." % (minimum_capacity_pct, volume_name, self.group_name, self.ssid))
+
+ remaining_base_volumes.pop(volume_name)
+ else:
+ add_volumes.update({volume_name: {"reserve_capacity_pct": reserve_capacity_pct,
+ "preferred_reserve_storage_pool": volume_info["preferred_reserve_storage_pool"]}})
+
+ changes["add_volumes"] = add_volumes
+ changes["expand_reserve_capacity"] = expand_volumes
+ changes["remove_volumes"] = remaining_base_volumes
+ return changes
+
+ def get_consistency_group_view(self):
+ """Determine and return consistency group view."""
+ group_id = self.get_consistency_group()["consistency_group_id"]
+
+ if not self.cache["get_consistency_group_view"]:
+ try:
+ rc, views = self.request("storage-systems/%s/consistency-groups/%s/views" % (self.ssid, group_id))
+
+ # Check for existing view (collection of snapshot volumes for a consistency group) within consistency group.
+ for view in views:
+ if view["name"] == self.view_name:
+ self.cache["get_consistency_group_view"] = view
+ self.cache["get_consistency_group_view"].update({"snapshot_volumes": []})
+
+ # Determine snapshot volumes associated with view.
+ try:
+ rc, snapshot_volumes = self.request("storage-systems/%s/snapshot-volumes" % self.ssid)
+
+ for snapshot_volume in snapshot_volumes:
+ if (snapshot_volume["membership"] and
+ snapshot_volume["membership"]["viewType"] == "member" and
+ snapshot_volume["membership"]["cgViewRef"] == view["cgViewRef"]):
+ self.cache["get_consistency_group_view"]["snapshot_volumes"].append(snapshot_volume)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve host mapping information!."
+ " Group [%s]. Array [%s]. Error [%s]." % (self.group_name, self.ssid, error))
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve consistency group's views!"
+ " Group [%s]. Array [%s]. Error [%s]." % (self.group_name, self.ssid, error))
+
+ return self.cache["get_consistency_group_view"]
+
+ def create_view_changes_required(self):
+ """Determine whether snapshot consistency group point-in-time view needs to be created."""
+ changes = {}
+ snapshot_images_info = self.get_pit_info()
+ changes.update({"name": self.view_name,
+ "sequence_number": snapshot_images_info["sequence_number"],
+ "images": snapshot_images_info["images"],
+ "volumes": self.volumes})
+
+ return changes
+
+ def update_view_changes_required(self):
+ """Determine the changes required for snapshot consistency group point-in-time view."""
+ changes = {"expand_reserve_capacity": [],
+ "trim_reserve_capacity": [],
+ "map_snapshot_volumes_mapping": [],
+ "unmap_snapshot_volumes_mapping": [],
+ "move_snapshot_volumes_mapping": [],
+ "update_snapshot_volumes_writable": []}
+ view = self.get_consistency_group_view()
+ host_objects_by_name = self.get_all_hosts_and_hostgroups_by_name()
+ host_objects_by_id = self.get_all_hosts_and_hostgroups_by_id()
+ existing_volumes_by_id = self.get_all_volumes_by_id()
+ if view:
+ if len(view["snapshot_volumes"]) != len(self.volumes):
+ self.module.fail_json(msg="Cannot add or remove snapshot volumes once view is created! Group [%s]. Array [%s]." % (self.group_name, self.ssid))
+
+ expand_volumes = {}
+ writable_volumes = {}
+ for snapshot_volume in view["snapshot_volumes"]:
+ for volume_name, volume_info in self.volumes.items():
+ if existing_volumes_by_id[snapshot_volume["baseVol"]]["name"] == volume_name:
+
+ # Check snapshot volume needs mapped to host or hostgroup.
+ if volume_info["snapshot_volume_host"] and not snapshot_volume["listOfMappings"]:
+ changes["map_snapshot_volumes_mapping"].append({"mappableObjectId": snapshot_volume["id"],
+ "lun": volume_info["snapshot_volume_lun"],
+ "targetId": host_objects_by_name[volume_info["snapshot_volume_host"]]["id"]})
+
+ # Check snapshot volume needs unmapped to host or hostgroup.
+ elif not volume_info["snapshot_volume_host"] and snapshot_volume["listOfMappings"]:
+ changes["unmap_snapshot_volumes_mapping"].append({"snapshot_volume_name": snapshot_volume["name"],
+ "lun_mapping_reference": snapshot_volume["listOfMappings"][0]["lunMappingRef"]})
+
+ # Check host mapping needs moved
+ elif (snapshot_volume["listOfMappings"] and
+ ((volume_info["snapshot_volume_host"] != host_objects_by_id[snapshot_volume["listOfMappings"][0]["mapRef"]]["name"]) or
+ (volume_info["snapshot_volume_lun"] != snapshot_volume["listOfMappings"][0]["lun"]))):
+ changes["move_snapshot_volumes_mapping"].append({"lunMappingRef": snapshot_volume["listOfMappings"][0]["lunMappingRef"],
+ "lun": volume_info["snapshot_volume_lun"],
+ "mapRef": host_objects_by_name[volume_info["snapshot_volume_host"]]["id"]})
+ # Check writable mode
+ if volume_info["snapshot_volume_writable"] != (snapshot_volume["accessMode"] == "readWrite"):
+ volume_info.update({"snapshot_volume_id": snapshot_volume["id"]})
+ writable_volumes.update({volume_name: volume_info})
+
+ # Check reserve capacity.
+ if volume_info["snapshot_volume_writable"] and snapshot_volume["accessMode"] == "readWrite":
+ current_reserve_capacity_pct = int(round(float(snapshot_volume["repositoryCapacity"]) /
+ float(snapshot_volume["baseVolumeCapacity"]) * 100))
+ if volume_info["reserve_capacity_pct"] > current_reserve_capacity_pct:
+ expand_reserve_capacity_pct = volume_info["reserve_capacity_pct"] - current_reserve_capacity_pct
+ expand_volumes.update({volume_name: {"reserve_capacity_pct": expand_reserve_capacity_pct,
+ "preferred_reserve_storage_pool": volume_info["preferred_reserve_storage_pool"],
+ "reserve_volume_id": snapshot_volume["repositoryVolume"]}})
+
+ elif volume_info["reserve_capacity_pct"] < current_reserve_capacity_pct:
+ existing_volumes_by_id = self.get_all_volumes_by_id()
+ existing_volumes_by_name = self.get_all_volumes_by_name()
+ existing_concat_volumes_by_id = self.get_all_concat_volumes_by_id()
+ trim_pct = current_reserve_capacity_pct - volume_info["reserve_capacity_pct"]
+
+ # Collect information about all that needs to be trimmed to meet or exceed required trim percentage.
+ concat_volume_id = snapshot_volume["repositoryVolume"]
+ concat_volume_info = existing_concat_volumes_by_id[concat_volume_id]
+ base_volume_info = existing_volumes_by_name[volume_name]
+ base_volume_size_bytes = int(base_volume_info["totalSizeInBytes"])
+
+ total_member_volume_size_bytes = 0
+ member_volumes_to_trim = []
+ for trim_count, member_volume_id in enumerate(reversed(concat_volume_info["memberRefs"][1:])):
+ member_volume_info = existing_volumes_by_id[member_volume_id]
+ member_volumes_to_trim.append(member_volume_info)
+
+ total_member_volume_size_bytes += int(member_volume_info["totalSizeInBytes"])
+ total_trimmed_size_pct = round(total_member_volume_size_bytes / base_volume_size_bytes * 100)
+
+ if total_trimmed_size_pct >= trim_pct:
+ changes["trim_reserve_capacity"].append({"concat_volume_id": concat_volume_id, "trim_count": trim_count + 1})
+
+ # Expand after trim if needed.
+ if total_trimmed_size_pct > trim_pct:
+ expand_reserve_capacity_pct = total_trimmed_size_pct - trim_pct
+ expand_volumes.update({
+ volume_name: {"reserve_capacity_pct": expand_reserve_capacity_pct,
+ "preferred_reserve_storage_pool": volume_info["preferred_reserve_storage_pool"],
+ "reserve_volume_id": snapshot_volume["repositoryVolume"]}})
+ break
+ else:
+ initial_reserve_volume_info = existing_volumes_by_id[concat_volume_info["memberRefs"][0]]
+ minimum_capacity_pct = round(int(initial_reserve_volume_info["totalSizeInBytes"]) / base_volume_size_bytes * 100)
+ self.module.fail_json(msg="Cannot delete initial reserve capacity volume! Minimum reserve capacity percent [%s]. "
+ "Base volume [%s]. Group [%s]. Array [%s]." % (minimum_capacity_pct, volume_name,
+ self.group_name, self.ssid))
+ changes.update({"expand_reserve_capacity": expand_volumes,
+ "update_snapshot_volumes_writable": writable_volumes})
+ return changes
+
+ def rollback_changes_required(self):
+ """Determine the changes required for snapshot consistency group point-in-time rollback."""
+ return self.get_pit_info()
+
+ def remove_snapshot_consistency_group(self, info):
+ """remove a new snapshot consistency group."""
+ try:
+ rc, resp = self.request("storage-systems/%s/consistency-groups/%s" % (self.ssid, info["consistency_group_id"]), method="DELETE")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to remove snapshot consistency group! Group [%s]. Array [%s]." % (self.group_name, self.ssid))
+
+ def create_snapshot_consistency_group(self, group_info):
+ """Create a new snapshot consistency group."""
+ consistency_group_request = {"name": self.group_name,
+ "fullWarnThresholdPercent": group_info["alert_threshold_pct"],
+ "autoDeleteThreshold": group_info["maximum_snapshots"],
+ "repositoryFullPolicy": group_info["reserve_capacity_full_policy"],
+ "rollbackPriority": group_info["rollback_priority"]}
+
+ try:
+ rc, group = self.request("storage-systems/%s/consistency-groups" % self.ssid, method="POST", data=consistency_group_request)
+ self.cache["get_consistency_group"].update({"consistency_group_id": group["cgRef"]})
+ except Exception as error:
+ self.module.fail_json(msg="Failed to remove snapshot consistency group! Group [%s]. Array [%s]." % (self.group_name, self.ssid))
+
+ def update_snapshot_consistency_group(self, group_info):
+ """Create a new snapshot consistency group."""
+ group_id = self.get_consistency_group()["consistency_group_id"]
+ consistency_group_request = {"name": self.group_name}
+ if "alert_threshold_pct" in group_info.keys():
+ consistency_group_request.update({"fullWarnThresholdPercent": group_info["alert_threshold_pct"]})
+ if "maximum_snapshots" in group_info.keys():
+ consistency_group_request.update({"autoDeleteThreshold": group_info["maximum_snapshots"]})
+ if "reserve_capacity_full_policy" in group_info.keys():
+ consistency_group_request.update({"repositoryFullPolicy": group_info["reserve_capacity_full_policy"]})
+ if "rollback_priority" in group_info.keys():
+ consistency_group_request.update({"rollbackPriority": group_info["rollback_priority"]})
+
+ try:
+ rc, group = self.request("storage-systems/%s/consistency-groups/%s" % (self.ssid, group_id), method="POST", data=consistency_group_request)
+ return group["cgRef"]
+ except Exception as error:
+ self.module.fail_json(msg="Failed to remove snapshot consistency group! Group [%s]. Array [%s]." % (self.group_name, self.ssid))
+
+ def add_base_volumes(self, volumes):
+ """Add base volume(s) to the consistency group."""
+ group_id = self.get_consistency_group()["consistency_group_id"]
+ member_volume_request = {"volumeToCandidates": {}}
+
+ for volume_name, volume_info in volumes.items():
+ candidate = self.get_candidate(volume_name, volume_info)
+ member_volume_request["volumeToCandidates"].update({volume_info["id"]: candidate["candidate"]["candidate"]})
+
+ try:
+ rc, resp = self.request("storage-systems/%s/consistency-groups/%s/member-volumes/batch" % (self.ssid, group_id),
+ method="POST", data=member_volume_request)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to add reserve capacity volume! Base volumes %s. Group [%s]. Error [%s]."
+ " Array [%s]." % (", ".join([volume for volume in member_volume_request.keys()]), self.group_name, error, self.ssid))
+
+ def remove_base_volumes(self, volume_info_list):
+ """Add base volume(s) to the consistency group."""
+ group_id = self.get_consistency_group()["consistency_group_id"]
+
+ for name, info in volume_info_list.items():
+ try:
+ rc, resp = self.request("storage-systems/%s/consistency-groups/%s/member-volumes/%s" % (self.ssid, group_id, info["id"]), method="DELETE")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to remove reserve capacity volume! Base volume [%s]. Group [%s]. Error [%s]. "
+ "Array [%s]." % (name, self.group_name, error, self.ssid))
+
+ def expand_reserve_capacities(self, reserve_volumes):
+ """Expand base volume(s) reserve capacity."""
+ for volume_name, volume_info in reserve_volumes.items():
+ candidate = self.get_candidate(volume_name, volume_info)
+ expand_request = {"repositoryRef": volume_info["reserve_volume_id"],
+ "expansionCandidate": candidate["candidate"]["candidate"]}
+ try:
+ rc, resp = self.request("/storage-systems/%s/repositories/concat/%s/expand" % (self.ssid, volume_info["reserve_volume_id"]),
+ method="POST", data=expand_request)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to expand reserve capacity volume! Group [%s]. Error [%s]. Array [%s]." % (self.group_name, error, self.ssid))
+
+ def trim_reserve_capacities(self, trim_reserve_volume_info_list):
+ """trim base volume(s) reserve capacity."""
+ for info in trim_reserve_volume_info_list:
+ trim_request = {"concatVol": info["concat_volume_id"],
+ "trimCount": info["trim_count"],
+ "retainRepositoryMembers": False}
+ try:
+ rc, trim = self.request("storage-systems/%s/symbol/trimConcatVolume?verboseErrorResponse=true" % self.ssid, method="POST", data=trim_request)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to trim reserve capacity. Group [%s]. Array [%s]. Error [%s]." % (self.group_name, self.ssid, error))
+
+ def create_pit_images(self):
+ """Generate snapshot image(s) for the base volumes in the consistency group."""
+ group_id = self.get_consistency_group()["consistency_group_id"]
+
+ try:
+ rc, images = self.request("storage-systems/%s/consistency-groups/%s/snapshots" % (self.ssid, group_id), method="POST")
+
+ # Embedded web services should store the pit_image metadata since sending it to the proxy will be written to it instead.
+ if self.pit_name:
+ try:
+ rc, key_values = self.request(self.url_path_prefix + "key-values/ansible|%s|%s" % (self.group_name, self.pit_name), method="POST",
+ data="%s|%s|%s" % (images[0]["pitTimestamp"], self.pit_name, self.pit_description))
+ except Exception as error:
+ self.module.fail_json(msg="Failed to create metadata for snapshot images!"
+ " Group [%s]. Array [%s]. Error [%s]." % (self.group_name, self.ssid, error))
+ except Exception as error:
+ self.module.fail_json(msg="Failed to create consistency group snapshot images!"
+ " Group [%s]. Array [%s]. Error [%s]." % (self.group_name, self.ssid, error))
+
+ def remove_pit_images(self, pit_info):
+ """Remove selected snapshot point-in-time images."""
+ group_id = self.get_consistency_group()["consistency_group_id"]
+
+ pit_sequence_number = int(pit_info["sequence_number"])
+ sequence_numbers = set(int(pit_image["sequence_number"]) for timestamp, pit_image in self.get_pit_images_by_timestamp().items()
+ if int(pit_image["sequence_number"]) < pit_sequence_number)
+ sequence_numbers.add(pit_sequence_number)
+
+ for sequence_number in sorted(sequence_numbers):
+
+ try:
+ rc, images = self.request("storage-systems/%s/consistency-groups/%s/snapshots/%s" % (self.ssid, group_id, sequence_number), method="DELETE")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to create consistency group snapshot images!"
+ " Group [%s]. Array [%s]. Error [%s]." % (self.group_name, self.ssid, error))
+
+ # Embedded web services should store the pit_image metadata since sending it to the proxy will be written to it instead.
+ if self.pit_name:
+ try:
+ rc, key_values = self.request(self.url_path_prefix + "key-values/ansible|%s|%s" % (self.group_name, self.pit_name), method="DELETE")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to delete metadata for snapshot images!"
+ " Group [%s]. Array [%s]. Error [%s]." % (self.group_name, self.ssid, error))
+
+ def cleanup_old_pit_metadata(self, keys):
+ """Delete unused point-in-time image metadata."""
+ for key in keys:
+ try:
+ rc, images = self.request("key-values/%s" % key, method="DELETE")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to purge unused point-in-time image metadata! Key [%s]. Array [%s]."
+ " Error [%s]." % (key, self.ssid, error))
+
+ def create_view(self, view_info):
+ """Generate consistency group view."""
+ group_id = self.get_consistency_group()["consistency_group_id"]
+ view_request = {"name": view_info["name"],
+ "pitSequenceNumber": view_info["sequence_number"],
+ "requests": []}
+
+ for volume_name, volume_info in view_info["volumes"].items():
+ candidate = None
+ if volume_info["snapshot_volume_writable"]:
+ candidate = self.get_candidate(volume_name, volume_info)
+
+ for image in view_info["images"]:
+ if volume_name == image["base_volume_name"]:
+ view_request["requests"].append({"pitId": image["id"],
+ "candidate": candidate["candidate"]["candidate"] if candidate else None,
+ "accessMode": "readWrite" if volume_info["snapshot_volume_writable"] else "readOnly",
+ "scanMedia": volume_info["snapshot_volume_validate"],
+ "validateParity": volume_info["snapshot_volume_validate"]})
+ break
+ else:
+ self.module.fail_json(msg="Base volume does not exist! Volume [%s]. Group [%s]. Array [%s]." % (volume_name, self.group_name, self.ssid))
+ try:
+ rc, images = self.request("storage-systems/%s/consistency-groups/%s/views/batch" % (self.ssid, group_id), method="POST", data=view_request)
+
+ # Determine snapshot volume mappings
+ view = self.get_consistency_group_view()
+ existing_volumes_by_id = self.get_all_volumes_by_id()
+ existing_hosts_by_name = self.get_all_hosts_and_hostgroups_by_name()
+ for volume_name, volume_info in self.volumes.items():
+ if volume_info["snapshot_volume_host"]:
+ for snapshot_volume in view["snapshot_volumes"]:
+ if volume_name == existing_volumes_by_id[snapshot_volume["baseVol"]]["name"]:
+ snapshot_volume_map_request = {"mappableObjectId": snapshot_volume["id"],
+ "lun": volume_info["snapshot_volume_lun"],
+ "targetId": existing_hosts_by_name[volume_info["snapshot_volume_host"]]["id"]}
+ try:
+ rc, mapping = self.request("storage-systems/%s/volume-mappings" % self.ssid, method="POST", data=snapshot_volume_map_request)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to map snapshot volume! Snapshot volume [%s]. View [%s]. Group [%s]. Array [%s]."
+ " Error [%s]" % (snapshot_volume["name"], self.view_name, self.group_name, self.ssid, error))
+ break
+ except Exception as error:
+ self.module.fail_json(msg="Failed to create consistency group snapshot volumes!"
+ " Group [%s]. Array [%s]. Error [%s]." % (self.group_name, self.ssid, error))
+
+ def map_view(self, map_information_list):
+ """Map consistency group point-in-time snapshot volumes to host or host group."""
+ existing_volumes = self.get_all_volumes_by_id()
+ existing_host_or_hostgroups = self.get_all_hosts_and_hostgroups_by_id()
+ for map_request in map_information_list:
+ try:
+ rc, mapping = self.request("storage-systems/%s/volume-mappings" % self.ssid, method="POST", data=map_request)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to map snapshot volume! Snapshot volume [%s]. Target [%s]. Lun [%s]. Group [%s]. Array [%s]."
+ " Error [%s]." % (existing_volumes[map_request["mappableObjectId"]],
+ existing_host_or_hostgroups[map_request["targetId"]],
+ map_request["lun"], self.group_name, self.ssid, error))
+
+ def unmap_view(self, unmap_info_list):
+ """Unmap consistency group point-in-time snapshot volumes from host or host group."""
+ for unmap_info in unmap_info_list:
+ try:
+ rc, unmap = self.request("storage-systems/%s/volume-mappings/%s" % (self.ssid, unmap_info["lun_mapping_reference"]), method="DELETE")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to unmap snapshot volume! Snapshot volume [%s]. View [%s]. Group [%s]. Array [%s]."
+ " Error [%s]." % (unmap_info["snapshot_volume_name"], self.view_name, self.group_name, self.ssid, error))
+
+ def move_view_mapping(self, map_information_list):
+ """Move consistency group point-in-time snapshot volumes to a different host or host group."""
+ existing_volumes = self.get_all_volumes_by_id()
+ existing_host_or_hostgroups = self.get_all_hosts_and_hostgroups_by_id()
+ for map_request in map_information_list:
+ try:
+ rc, mapping = self.request("storage-systems/%s/symbol/moveLUNMapping?verboseErrorResponse=true" % self.ssid, method="POST", data=map_request)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to move snapshot volume mapping! Snapshot volume [%s]. Target [%s]. Lun [%s]. Group [%s]. Array [%s]."
+ " Error [%s]." % (existing_volumes[map_request["mappableObjectId"]],
+ existing_host_or_hostgroups[map_request["targetId"]],
+ map_request["lun"], self.group_name, self.ssid, error))
+
+ def convert_view_to_writable(self, convert_view_information_list):
+ """Make consistency group point-in-time snapshot volumes writable."""
+ for volume_name, volume_info in convert_view_information_list.items():
+ candidate = self.get_candidate(volume_name, volume_info)
+ convert_request = {"fullThreshold": self.alert_threshold_pct,
+ "repositoryCandidate": candidate["candidate"]["candidate"]}
+ try:
+ rc, convert = self.request("/storage-systems/%s/snapshot-volumes/%s/convertReadOnly" % (self.ssid, volume_info["snapshot_volume_id"]),
+ method="POST", data=convert_request)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to convert snapshot volume to read/write! Snapshot volume [%s]. View [%s] Group [%s]. Array [%s]."
+ " Error [%s]." % (volume_info["snapshot_volume_id"], self.view_name, self.group_name, self.ssid, error))
+
+ def remove_view(self, view_id):
+ """Remove a consistency group view."""
+ group_id = self.get_consistency_group()["consistency_group_id"]
+
+ try:
+ rc, images = self.request("storage-systems/%s/consistency-groups/%s/views/%s" % (self.ssid, group_id, view_id), method="DELETE")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to create consistency group snapshot volumes!"
+ " Group [%s]. Array [%s]. Error [%s]." % (self.group_name, self.ssid, error))
+
+ def rollback(self, rollback_info):
+ """Rollback consistency group base volumes to point-in-time snapshot images."""
+ group_info = self.get_consistency_group()
+ group_id = group_info["consistency_group_id"]
+
+ if self.rollback_backup:
+ self.create_pit_images()
+
+ # Ensure consistency group rollback priority is set correctly prior to rollback.
+ if self.rollback_priority:
+ try:
+ rc, resp = self.request("storage-systems/%s/consistency-groups/%s" % (self.ssid, group_id), method="POST",
+ data={"rollbackPriority": self.rollback_priority})
+ except Exception as error:
+ self.module.fail_json(msg="Failed to updated consistency group rollback priority!"
+ " Group [%s]. Array [%s]. Error [%s]." % (self.group_name, self.ssid, error))
+
+ try:
+ rc, resp = self.request("storage-systems/%s/symbol/startPITRollback" % self.ssid, method="POST",
+ data={"pitRef": [image["id"] for image in rollback_info["images"]]})
+ except Exception as error:
+ self.module.fail_json(msg="Failed to initiate rollback operations!" " Group [%s]. Array [%s]. Error [%s]." % (self.group_name, self.ssid, error))
+
+ def complete_volume_definitions(self):
+ """Determine the complete self.volumes structure."""
+ group = self.get_consistency_group()
+
+ if not self.volumes:
+ for volume in group["base_volumes"]:
+ self.volumes.update({volume["name"]: {"reserve_capacity_pct": self.reserve_capacity_pct,
+ "preferred_reserve_storage_pool": self.preferred_reserve_storage_pool,
+ "snapshot_volume_writable": self.view_writable,
+ "snapshot_volume_validate": self.view_validate,
+ "snapshot_volume_host": self.view_host,
+ "snapshot_volume_lun": None}})
+
+ # Ensure a preferred_reserve_storage_pool has been selected
+ existing_storage_pools_by_id = self.get_all_storage_pools_by_id()
+ existing_storage_pools_by_name = self.get_all_storage_pools_by_name()
+ existing_volumes_by_name = self.get_all_volumes_by_name()
+ existing_volumes_by_id = self.get_all_volumes_by_id()
+ existing_mappings = self.get_mapping_by_id()
+ existing_host_and_hostgroup_by_id = self.get_all_hosts_and_hostgroups_by_id()
+ existing_host_and_hostgroup_by_name = self.get_all_hosts_and_hostgroups_by_name()
+ for volume_name, volume_info in self.volumes.items():
+ base_volume_storage_pool_id = existing_volumes_by_name[volume_name]["volumeGroupRef"]
+ base_volume_storage_pool_name = existing_storage_pools_by_id[base_volume_storage_pool_id]["name"]
+
+ # Check storage group information.
+ if not volume_info["preferred_reserve_storage_pool"]:
+ volume_info["preferred_reserve_storage_pool"] = base_volume_storage_pool_name
+ elif volume_info["preferred_reserve_storage_pool"] not in existing_storage_pools_by_name.keys():
+ self.module.fail_json(msg="Preferred storage pool or volume group does not exist! Storage pool [%s]. Group [%s]."
+ " Array [%s]." % (volume_info["preferred_reserve_storage_pool"], self.group_name, self.ssid))
+
+ # Check host mapping information
+ if self.state == "present" and self.type == "view":
+ view_info = self.get_consistency_group_view()
+
+ if volume_info["snapshot_volume_host"]:
+ if volume_info["snapshot_volume_host"] not in existing_host_and_hostgroup_by_name:
+ self.module.fail_json(msg="Specified host or host group does not exist! Host [%s]. Group [%s]."
+ " Array [%s]." % (volume_info["snapshot_volume_host"], self.group_name, self.ssid))
+
+ if not volume_info["snapshot_volume_lun"]:
+ if view_info:
+ for snapshot_volume in view_info["snapshot_volumes"]:
+ if snapshot_volume["listOfMappings"]:
+ mapping = snapshot_volume["listOfMappings"][0]
+ if (volume_name == existing_volumes_by_id[snapshot_volume["baseVol"]]["name"] and
+ volume_info["snapshot_volume_host"] == existing_host_and_hostgroup_by_id[mapping["mapRef"]]["name"]):
+ volume_info["snapshot_volume_lun"] = mapping["lun"]
+ break
+ else:
+ host_id = existing_host_and_hostgroup_by_name[volume_info["snapshot_volume_host"]]["id"]
+ for next_lun in range(1, 100):
+
+ if host_id not in existing_mappings.keys():
+ existing_mappings.update({host_id: {}})
+
+ if next_lun not in existing_mappings[host_id].keys():
+ volume_info["snapshot_volume_lun"] = next_lun
+ existing_mappings[host_id].update({next_lun: None})
+ break
+
+ def apply(self):
+ """Apply any required snapshot state changes."""
+ changes_required = False
+ group = self.get_consistency_group()
+ group_changes = {}
+
+ # Determine which changes are required.
+ if group:
+
+ # Determine whether changes are required.
+ if self.state == "absent":
+ if self.type == "group":
+ if self.group_name:
+ changes_required = True
+ elif self.type == "pit":
+ group_changes = self.get_pit_info()
+ if group_changes:
+ changes_required = True
+ elif self.type == "view":
+ group_changes = self.get_consistency_group_view()
+ if group_changes:
+ changes_required = True
+
+ elif self.state == "present":
+ self.complete_volume_definitions()
+
+ if self.type == "group":
+ group_changes = self.update_changes_required()
+ if (group_changes["update_group"] or
+ group_changes["add_volumes"] or
+ group_changes["remove_volumes"] or
+ group_changes["expand_reserve_capacity"] or
+ group_changes["trim_reserve_capacity"]):
+ changes_required = True
+
+ elif self.type == "pit":
+ changes_required = True
+
+ elif self.type == "view":
+ if self.get_consistency_group_view():
+ group_changes = self.update_view_changes_required()
+ if (group_changes["expand_reserve_capacity"] or
+ group_changes["trim_reserve_capacity"] or
+ group_changes["map_snapshot_volumes_mapping"] or
+ group_changes["unmap_snapshot_volumes_mapping"] or
+ group_changes["move_snapshot_volumes_mapping"] or
+ group_changes["update_snapshot_volumes_writable"]):
+ changes_required = True
+ else:
+ group_changes = self.create_view_changes_required()
+ changes_required = True
+
+ elif self.state == "rollback":
+ self.complete_volume_definitions()
+ if not self.volumes:
+ for volume in group["base_volumes"]:
+ self.volumes.update({volume["name"]: None})
+ group_changes = self.rollback_changes_required()
+ if group_changes:
+ changes_required = True
+
+ else:
+ if self.state == "present":
+ if self.type == "group":
+ self.complete_volume_definitions()
+ group_changes = self.create_changes_required()
+ changes_required = True
+ elif self.type == "pit":
+ self.module.fail_json(msg="Snapshot point-in-time images cannot be taken when the snapshot consistency group does not exist!"
+ " Group [%s]. Array [%s]." % (self.group_name, self.ssid))
+ elif self.type == "view":
+ self.module.fail_json(msg="Snapshot view cannot be created when the snapshot consistency group does not exist!"
+ " Group [%s]. Array [%s]." % (self.group_name, self.ssid))
+ elif self.state == "rollback":
+ self.module.fail_json(msg="Rollback operation is not available when the snapshot consistency group does not exist!"
+ " Group [%s]. Array [%s]." % (self.group_name, self.ssid))
+
+ # Determine if they're any key-value pairs that need to be cleaned up since snapshot pit images were deleted outside of this module.
+ unused_pit_keys = self.get_unused_pit_key()
+
+ # Apply any required changes.
+ if (changes_required or unused_pit_keys) and not self.module.check_mode:
+ if group:
+ if self.state == "absent":
+ if self.type == "group":
+ self.remove_snapshot_consistency_group(group)
+ elif self.type == "pit":
+ self.remove_pit_images(group_changes)
+ elif self.type == "view":
+ self.remove_view(group_changes["id"])
+
+ elif self.state == "present":
+
+ if self.type == "group":
+ if group_changes["update_group"]:
+ self.update_snapshot_consistency_group(group_changes["update_group"])
+ if group_changes["add_volumes"]:
+ self.add_base_volumes(group_changes["add_volumes"])
+ if group_changes["remove_volumes"]:
+ self.remove_base_volumes(group_changes["remove_volumes"])
+ if group_changes["trim_reserve_capacity"]:
+ self.trim_reserve_capacities(group_changes["trim_reserve_capacity"])
+ if group_changes["expand_reserve_capacity"]:
+ sleep(15)
+ if group_changes["expand_reserve_capacity"]:
+ self.expand_reserve_capacities(group_changes["expand_reserve_capacity"])
+
+ elif self.type == "pit":
+ self.create_pit_images()
+
+ elif self.type == "view":
+ view = self.get_consistency_group_view()
+ if view:
+ if group_changes["trim_reserve_capacity"]:
+ self.trim_reserve_capacities(group_changes["trim_reserve_capacity"])
+ if group_changes["expand_reserve_capacity"]:
+ sleep(15)
+ if group_changes["expand_reserve_capacity"]:
+ self.expand_reserve_capacities(group_changes["expand_reserve_capacity"])
+ if group_changes["map_snapshot_volumes_mapping"]:
+ self.map_view(group_changes["map_snapshot_volumes_mapping"])
+ if group_changes["unmap_snapshot_volumes_mapping"]:
+ self.unmap_view(group_changes["unmap_snapshot_volumes_mapping"])
+ if group_changes["move_snapshot_volumes_mapping"]:
+ self.move_view_mapping(group_changes["move_snapshot_volumes_mapping"])
+ if group_changes["update_snapshot_volumes_writable"]:
+ self.convert_view_to_writable(group_changes["update_snapshot_volumes_writable"])
+ else:
+ self.create_view(group_changes)
+
+ elif self.state == "rollback":
+ self.rollback(group_changes)
+
+ elif self.type == "group":
+ self.create_snapshot_consistency_group(group_changes["create_group"])
+ self.add_base_volumes(group_changes["add_volumes"])
+
+ if unused_pit_keys:
+ self.cleanup_old_pit_metadata()
+
+ self.module.exit_json(changed=changes_required, group_changes=group_changes, deleted_metadata_keys=unused_pit_keys)
+
+
+def main():
+ snapshot = NetAppESeriesSnapshot()
+ snapshot.apply()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_storagepool.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_storagepool.py
new file mode 100644
index 000000000..daf2308d7
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_storagepool.py
@@ -0,0 +1,1057 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+---
+module: na_santricity_storagepool
+short_description: NetApp E-Series manage volume groups and disk pools
+description: Create or remove volume groups and disk pools for NetApp E-series storage arrays.
+author:
+ - Nathan Swartz (@ndswartz)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.santricity_doc
+options:
+ state:
+ description:
+ - Whether the specified storage pool should exist or not.
+ - Note that removing a storage pool currently requires the removal of all defined volumes first.
+ type: str
+ choices: ["present", "absent"]
+ default: "present"
+ name:
+ description:
+ - The name of the storage pool to manage
+ type: str
+ required: true
+ criteria_drive_count:
+ description:
+ - The number of disks to use for building the storage pool.
+ - When I(state=="present") then I(criteria_drive_count) or I(criteria_min_usable_capacity) must be specified.
+ - The pool will be expanded if this number exceeds the number of disks already in place (See expansion note below)
+ type: int
+ required: false
+ criteria_min_usable_capacity:
+ description:
+ - The minimum size of the storage pool (in size_unit).
+ - When I(state=="present") then I(criteria_drive_count) or I(criteria_min_usable_capacity) must be specified.
+ - The pool will be expanded if this value exceeds its current size. (See expansion note below)
+ - Do not use when the storage system contains mixed drives and I(usable_drives) is specified since usable capacities may not be accurate.
+ type: float
+ required: false
+ criteria_drive_type:
+ description:
+ - The type of disk (hdd or ssd) to use when searching for candidates to use.
+ - When not specified each drive type will be evaluated until successful drive candidates are found starting with
+ the most prevalent drive type.
+ type: str
+ choices: ["hdd","ssd"]
+ required: false
+ criteria_size_unit:
+ description:
+ - The unit used to interpret size parameters
+ type: str
+ choices: ["bytes", "b", "kb", "mb", "gb", "tb", "pb", "eb", "zb", "yb"]
+ default: "gb"
+ required: false
+ criteria_drive_min_size:
+ description:
+ - The minimum individual drive size (in size_unit) to consider when choosing drives for the storage pool.
+ type: float
+ required: false
+ criteria_drive_max_size:
+ description:
+ - The maximum individual drive size (in size_unit) to consider when choosing drives for the storage pool.
+ type: float
+ required: false
+ criteria_drive_interface_type:
+ description:
+ - The interface type to use when selecting drives for the storage pool
+ - If not provided then all interface types will be considered.
+ type: str
+ choices: ["scsi", "fibre", "sata", "pata", "fibre520b", "sas", "sas4k", "nvme4k"]
+ required: false
+ criteria_drive_require_da:
+ description:
+ - Ensures the storage pool will be created with only data assurance (DA) capable drives.
+ - Only available for new storage pools; existing storage pools cannot be converted.
+ type: bool
+ default: false
+ required: false
+ criteria_drive_require_fde:
+ description:
+ - Whether full disk encryption ability is required for drives to be added to the storage pool
+ type: bool
+ default: false
+ required: false
+ usable_drives:
+ description:
+ - Ordered comma-separated list of tray/drive slots to be selected for drive candidates (drives that are used will be skipped).
+ - Each drive entry is represented as <tray_number>:<(optional) drawer_number>:<drive_slot_number> (e.g. 99:0 is the base tray's drive slot 0).
+ - The base tray's default identifier is 99 and expansion trays are added in the order they are attached but these identifiers can be changed by the user.
+ - Be aware that trays with multiple drawers still have a dedicated drive slot for all drives and the slot number does not rely on the drawer; however,
+ if you're planing to have drawer protection you need to order accordingly.
+ - When I(usable_drives) are not provided then the drive candidates will be selected by the storage system.
+ type: str
+ required: false
+ raid_level:
+ description:
+ - The RAID level of the storage pool to be created.
+ - Required only when I(state=="present").
+ - When I(raid_level=="raidDiskPool") then I(criteria_drive_count >= 10 or criteria_drive_count >= 11) is required
+ depending on the storage array specifications.
+ - When I(raid_level=="raid0") then I(1<=criteria_drive_count) is required.
+ - When I(raid_level=="raid1") then I(2<=criteria_drive_count) is required.
+ - When I(raid_level=="raid3") then I(3<=criteria_drive_count<=30) is required.
+ - When I(raid_level=="raid5") then I(3<=criteria_drive_count<=30) is required.
+ - When I(raid_level=="raid6") then I(5<=criteria_drive_count<=30) is required.
+ - Note that raidAll will be treated as raidDiskPool and raid3 as raid5.
+ type: str
+ default: "raidDiskPool"
+ choices: ["raidAll", "raid0", "raid1", "raid3", "raid5", "raid6", "raidDiskPool"]
+ required: false
+ secure_pool:
+ description:
+ - Enables security at rest feature on the storage pool.
+ - Will only work if all drives in the pool are security capable (FDE, FIPS, or mix)
+ - Warning, once security is enabled it is impossible to disable without erasing the drives.
+ type: bool
+ required: false
+ reserve_drive_count:
+ description:
+ - Set the number of drives reserved by the storage pool for reconstruction operations.
+ - Only valid on raid disk pools.
+ type: int
+ required: false
+ remove_volumes:
+ description:
+ - Prior to removing a storage pool, delete all volumes in the pool.
+ type: bool
+ default: true
+ required: false
+ erase_secured_drives:
+ description:
+ - If I(state=="absent") then all storage pool drives will be erase
+ - If I(state=="present") then delete all available storage array drives that have security enabled.
+ type: bool
+ default: true
+ required: false
+ ddp_critical_threshold_pct:
+ description:
+ - Issues a critical alert when threshold of storage has been allocated.
+ - Only applicable when I(raid_level=="raidDiskPool").
+ - Set I(ddp_critical_threshold_pct==0) to disable alert.
+ type: int
+ default: 85
+ required: false
+ ddp_warning_threshold_pct:
+ description:
+ - Issues a warning alert when threshold of storage has been allocated.
+ - Only applicable when I(raid_level=="raidDiskPool").
+ - Set I(ddp_warning_threshold_pct==0) to disable alert.
+ type: int
+ default: 85
+ required: false
+notes:
+ - The expansion operations are non-blocking due to the time consuming nature of expanding volume groups
+ - Traditional volume groups (raid0, raid1, raid5, raid6) are performed in steps dictated by the storage array. Each
+ required step will be attempted until the request fails which is likely because of the required expansion time.
+ - raidUnsupported will be treated as raid0, raidAll as raidDiskPool and raid3 as raid5.
+ - Tray loss protection and drawer loss protection will be chosen if at all possible.
+"""
+EXAMPLES = """
+- name: No disk groups
+ na_santricity_storagepool:
+ ssid: "{{ ssid }}"
+ name: "{{ item }}"
+ state: absent
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+"""
+RETURN = """
+msg:
+ description: Success message
+ returned: success
+ type: str
+ sample: Json facts for the pool that was created.
+"""
+import functools
+from itertools import groupby
+from time import sleep
+
+from pprint import pformat
+from ansible.module_utils._text import to_native
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule
+
+
+def get_most_common_elements(iterator):
+ """Returns a generator containing a descending list of most common elements."""
+ if not isinstance(iterator, list):
+ raise TypeError("iterator must be a list.")
+
+ grouped = [(key, len(list(group))) for key, group in groupby(sorted(iterator))]
+ return sorted(grouped, key=lambda x: x[1], reverse=True)
+
+
+def memoize(func):
+ """Generic memoizer for any function with any number of arguments including zero."""
+
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ class MemoizeFuncArgs(dict):
+ def __missing__(self, _key):
+ self[_key] = func(*args, **kwargs)
+ return self[_key]
+
+ key = str((args, kwargs)) if args and kwargs else "no_argument_response"
+ return MemoizeFuncArgs().__getitem__(key)
+
+ return wrapper
+
+
+class NetAppESeriesStoragePool(NetAppESeriesModule):
+ EXPANSION_TIMEOUT_SEC = 10
+ DEFAULT_DISK_POOL_MINIMUM_DISK_COUNT = 11
+
+ def __init__(self):
+ version = "02.00.0000.0000"
+ ansible_options = dict(
+ state=dict(choices=["present", "absent"], default="present", type="str"),
+ name=dict(required=True, type="str"),
+ criteria_size_unit=dict(choices=["bytes", "b", "kb", "mb", "gb", "tb", "pb", "eb", "zb", "yb"],
+ default="gb", type="str"),
+ criteria_drive_count=dict(type="int"),
+ criteria_drive_interface_type=dict(choices=["scsi", "fibre", "sata", "pata", "fibre520b", "sas", "sas4k", "nvme4k"], type="str"),
+ criteria_drive_type=dict(choices=["ssd", "hdd"], type="str", required=False),
+ criteria_drive_min_size=dict(type="float"),
+ criteria_drive_max_size=dict(type="float"),
+ criteria_drive_require_da=dict(type="bool", required=False),
+ criteria_drive_require_fde=dict(type="bool", required=False),
+ criteria_min_usable_capacity=dict(type="float"),
+ usable_drives=dict(type="str", required=False),
+ raid_level=dict(choices=["raidAll", "raid0", "raid1", "raid3", "raid5", "raid6", "raidDiskPool"],
+ default="raidDiskPool"),
+ erase_secured_drives=dict(type="bool", default=True),
+ secure_pool=dict(type="bool", default=False),
+ reserve_drive_count=dict(type="int"),
+ remove_volumes=dict(type="bool", default=True),
+ ddp_critical_threshold_pct=dict(type="int", default=85, required=False),
+ ddp_warning_threshold_pct=dict(type="int", default=0, required=False))
+
+ required_if = [["state", "present", ["raid_level"]]]
+ super(NetAppESeriesStoragePool, self).__init__(ansible_options=ansible_options,
+ web_services_version=version,
+ supports_check_mode=True,
+ required_if=required_if)
+
+ args = self.module.params
+ self.state = args["state"]
+ self.ssid = args["ssid"]
+ self.name = args["name"]
+ self.criteria_drive_count = args["criteria_drive_count"]
+ self.criteria_min_usable_capacity = args["criteria_min_usable_capacity"]
+ self.criteria_size_unit = args["criteria_size_unit"]
+ self.criteria_drive_min_size = args["criteria_drive_min_size"]
+ self.criteria_drive_max_size = args["criteria_drive_max_size"]
+ self.criteria_drive_type = args["criteria_drive_type"]
+ self.criteria_drive_interface_type = args["criteria_drive_interface_type"]
+ self.criteria_drive_require_fde = args["criteria_drive_require_fde"]
+ self.criteria_drive_require_da = args["criteria_drive_require_da"]
+ self.raid_level = args["raid_level"]
+ self.erase_secured_drives = args["erase_secured_drives"]
+ self.secure_pool = args["secure_pool"]
+ self.reserve_drive_count = args["reserve_drive_count"]
+ self.remove_volumes = args["remove_volumes"]
+ self.ddp_critical_threshold_pct = args["ddp_critical_threshold_pct"]
+ self.ddp_warning_threshold_pct = args["ddp_warning_threshold_pct"]
+ self.pool_detail = None
+
+ if self.ddp_critical_threshold_pct < 0 or self.ddp_critical_threshold_pct > 100:
+ self.module.fail_json(msg="Invalid I(ddp_critical_threshold_pct) value! Must between or equal to 0 and 100. Array [%s]" % self.ssid)
+ if self.ddp_warning_threshold_pct < 0 or self.ddp_warning_threshold_pct > 100:
+ self.module.fail_json(msg="Invalid I(ddp_warning_threshold_pct) value! Must between or equal to 0 and 100. Array [%s]" % self.ssid)
+
+ # Change all sizes to be measured in bytes
+ if self.criteria_min_usable_capacity:
+ self.criteria_min_usable_capacity = int(self.criteria_min_usable_capacity * self.SIZE_UNIT_MAP[self.criteria_size_unit])
+ if self.criteria_drive_min_size:
+ self.criteria_drive_min_size = int(self.criteria_drive_min_size * self.SIZE_UNIT_MAP[self.criteria_size_unit])
+ if self.criteria_drive_max_size:
+ self.criteria_drive_max_size = int(self.criteria_drive_max_size * self.SIZE_UNIT_MAP[self.criteria_size_unit])
+ self.criteria_size_unit = "bytes"
+
+ # Adjust unused raid level option to reflect documentation
+ if self.raid_level == "raidAll":
+ self.raid_level = "raidDiskPool"
+ if self.raid_level == "raid3":
+ self.raid_level = "raid5"
+
+ # Parse usable drive string into tray:slot list
+ self.usable_drives = []
+ if args["usable_drives"]:
+ for usable_drive in args["usable_drives"].split(","):
+ location = [int(item) for item in usable_drive.split(":")]
+ if len(location) == 2:
+ tray, slot = location
+ self.usable_drives.append([tray, 0, slot + 1]) # slot must be one-indexed instead of zero.
+ elif len(location) == 3:
+ tray, drawer, slot = location
+ self.usable_drives.append([tray, drawer - 1, slot + 1]) # slot must be one-indexed instead of zero.
+ else:
+ self.module.fail_json(msg="Invalid I(usable_drives) value! Must be a comma-separated list of <TRAY_NUMBER>:<DRIVE_SLOT_NUMBER> entries."
+ " Array [%s]." % self.ssid)
+
+ @property
+ @memoize
+ def available_drives(self):
+ """Determine the list of available drives"""
+ return [drive["id"] for drive in self.drives if drive["available"] and drive["status"] == "optimal"]
+
+ @property
+ @memoize
+ def available_drive_types(self):
+ """Determine the types of available drives sorted by the most common first."""
+ types = [drive["driveMediaType"] for drive in self.drives]
+ return [entry[0] for entry in get_most_common_elements(types)]
+
+ @property
+ @memoize
+ def available_drive_interface_types(self):
+ """Determine the types of available drives."""
+ interfaces = [drive["phyDriveType"] for drive in self.drives]
+ return [entry[0] for entry in get_most_common_elements(interfaces)]
+
+ @property
+ def storage_pool_drives(self):
+ """Retrieve list of drives found in storage pool."""
+ return [drive for drive in self.drives if drive["currentVolumeGroupRef"] == self.pool_detail["id"] and not drive["hotSpare"]]
+
+ @property
+ def expandable_drive_count(self):
+ """Maximum number of drives that a storage pool can be expended at a given time."""
+ capabilities = None
+ if self.raid_level == "raidDiskPool":
+ return len(self.available_drives)
+
+ try:
+ rc, capabilities = self.request("storage-systems/%s/capabilities" % self.ssid)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to fetch maximum expandable drive count. Array id [%s]. Error [%s]."
+ % (self.ssid, to_native(error)))
+
+ return capabilities["featureParameters"]["maxDCEDrives"]
+
+ @property
+ def disk_pool_drive_minimum(self):
+ """Provide the storage array's minimum disk pool drive count."""
+ rc, attr = self.request("storage-systems/%s/symbol/getSystemAttributeDefaults" % self.ssid, ignore_errors=True)
+
+ # Standard minimum is 11 drives but some allow 10 drives. 10 will be the default
+ if (rc != 200 or "minimumDriveCount" not in attr["defaults"]["diskPoolDefaultAttributes"].keys() or
+ attr["defaults"]["diskPoolDefaultAttributes"]["minimumDriveCount"] == 0):
+ return self.DEFAULT_DISK_POOL_MINIMUM_DISK_COUNT
+
+ return attr["defaults"]["diskPoolDefaultAttributes"]["minimumDriveCount"]
+
+ def get_available_drive_capacities(self, drive_id_list=None):
+ """Determine the list of available drive capacities."""
+ if drive_id_list:
+ available_drive_capacities = set([int(drive["usableCapacity"]) for drive in self.drives
+ if drive["id"] in drive_id_list and drive["available"] and
+ drive["status"] == "optimal"])
+ else:
+ available_drive_capacities = set([int(drive["usableCapacity"]) for drive in self.drives
+ if drive["available"] and drive["status"] == "optimal"])
+
+ self.module.log("available drive capacities: %s" % available_drive_capacities)
+ return list(available_drive_capacities)
+
+ @property
+ def drives(self):
+ """Retrieve list of drives found in storage system."""
+ drives = None
+ try:
+ rc, drives = self.request("storage-systems/%s/drives" % self.ssid)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to fetch disk drives. Array id [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ return drives
+
+ def tray_by_ids(self):
+ """Retrieve list of trays found in storage system and return dictionary of trays keyed by ids."""
+ tray_by_ids = {}
+ try:
+ rc, inventory = self.request("storage-systems/%s/hardware-inventory" % self.ssid)
+ for tray in inventory["trays"]:
+ tray_by_ids.update({tray["trayRef"]: {"tray_number": tray["trayId"],
+ "drawer_count": tray["driveLayout"]["numRows"] * tray["driveLayout"]["numColumns"]}})
+ except Exception as error:
+ self.module.fail_json(msg="Failed to fetch trays. Array id [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ return tray_by_ids
+
+ def convert_drives_list_into_drive_info_by_ids(self):
+ """Determine drive identifiers base on provided drive list. Provide usable_ids list to select subset."""
+ tray_by_ids = self.tray_by_ids()
+
+ drives = []
+ for usable_drive in self.usable_drives:
+ tray, drawer, slot = usable_drive
+ for drive in self.drives:
+ drawer_slot = drawer * tray_by_ids[drive["physicalLocation"]["trayRef"]]["drawer_count"] + slot
+ if drawer_slot == drive["physicalLocation"]["slot"] and tray == tray_by_ids[drive["physicalLocation"]["trayRef"]]["tray_number"]:
+ if drive["available"]:
+ drives.append(drive["id"])
+ break
+
+ return drives
+
+ def is_drive_count_valid(self, drive_count):
+ """Validate drive count criteria is met."""
+ if self.criteria_drive_count and drive_count < self.criteria_drive_count:
+ return False
+
+ if self.raid_level == "raidDiskPool":
+ return drive_count >= self.disk_pool_drive_minimum
+ if self.raid_level == "raid0":
+ return drive_count > 0
+ if self.raid_level == "raid1":
+ return drive_count >= 2 and (drive_count % 2) == 0
+ if self.raid_level in ["raid3", "raid5"]:
+ return 3 <= drive_count <= 30
+ if self.raid_level == "raid6":
+ return 5 <= drive_count <= 30
+ return False
+
+ @property
+ def storage_pool(self):
+ """Retrieve storage pool information."""
+ storage_pools_resp = None
+ try:
+ rc, storage_pools_resp = self.request("storage-systems/%s/storage-pools" % self.ssid)
+ except Exception as err:
+ self.module.fail_json(msg="Failed to get storage pools. Array id [%s]. Error [%s]. State[%s]."
+ % (self.ssid, to_native(err), self.state))
+
+ pool_detail = [pool for pool in storage_pools_resp if pool["name"] == self.name]
+ return pool_detail[0] if pool_detail else dict()
+
+ @property
+ def storage_pool_volumes(self):
+ """Retrieve list of volumes associated with storage pool."""
+ volumes_resp = None
+ try:
+ rc, volumes_resp = self.request("storage-systems/%s/volumes" % self.ssid)
+ except Exception as err:
+ self.module.fail_json(msg="Failed to get storage pools. Array id [%s]. Error [%s]. State[%s]."
+ % (self.ssid, to_native(err), self.state))
+
+ group_ref = self.storage_pool["volumeGroupRef"]
+ storage_pool_volume_list = [volume["id"] for volume in volumes_resp if volume["volumeGroupRef"] == group_ref]
+ return storage_pool_volume_list
+
+ def get_ddp_capacity(self, expansion_drive_list):
+ """Return the total usable capacity based on the additional drives."""
+
+ def get_ddp_error_percent(_drive_count, _extent_count):
+ """Determine the space reserved for reconstruction"""
+ if _drive_count <= 36:
+ if _extent_count <= 600:
+ return 0.40
+ elif _extent_count <= 1400:
+ return 0.35
+ elif _extent_count <= 6200:
+ return 0.20
+ elif _extent_count <= 50000:
+ return 0.15
+ elif _drive_count <= 64:
+ if _extent_count <= 600:
+ return 0.20
+ elif _extent_count <= 1400:
+ return 0.15
+ elif _extent_count <= 6200:
+ return 0.10
+ elif _extent_count <= 50000:
+ return 0.05
+ elif _drive_count <= 480:
+ if _extent_count <= 600:
+ return 0.20
+ elif _extent_count <= 1400:
+ return 0.15
+ elif _extent_count <= 6200:
+ return 0.10
+ elif _extent_count <= 50000:
+ return 0.05
+
+ self.module.fail_json(msg="Drive count exceeded the error percent table. Array[%s]" % self.ssid)
+
+ def get_ddp_reserved_drive_count(_disk_count):
+ """Determine the number of reserved drive."""
+ reserve_count = 0
+
+ if self.reserve_drive_count:
+ reserve_count = self.reserve_drive_count
+ elif _disk_count >= 256:
+ reserve_count = 8
+ elif _disk_count >= 192:
+ reserve_count = 7
+ elif _disk_count >= 128:
+ reserve_count = 6
+ elif _disk_count >= 64:
+ reserve_count = 4
+ elif _disk_count >= 32:
+ reserve_count = 3
+ elif _disk_count >= 12:
+ reserve_count = 2
+ elif _disk_count == 11:
+ reserve_count = 1
+
+ return reserve_count
+
+ if self.pool_detail:
+ drive_count = len(self.storage_pool_drives) + len(expansion_drive_list)
+ else:
+ drive_count = len(expansion_drive_list)
+
+ drive_usable_capacity = min(min(self.get_available_drive_capacities()),
+ min(self.get_available_drive_capacities(expansion_drive_list)))
+ drive_data_extents = ((drive_usable_capacity - 8053063680) / 536870912)
+ maximum_stripe_count = (drive_count * drive_data_extents) / 10
+
+ error_percent = get_ddp_error_percent(drive_count, drive_data_extents)
+ error_overhead = (drive_count * drive_data_extents / 10 * error_percent + 10) / 10
+
+ total_stripe_count = maximum_stripe_count - error_overhead
+ stripe_count_per_drive = total_stripe_count / drive_count
+ reserved_stripe_count = get_ddp_reserved_drive_count(drive_count) * stripe_count_per_drive
+ available_stripe_count = total_stripe_count - reserved_stripe_count
+
+ return available_stripe_count * 4294967296
+
+ def get_candidate_drive_request(self):
+ """Perform request for new volume creation."""
+
+ candidates_list = list()
+ drive_types = [self.criteria_drive_type] if self.criteria_drive_type else self.available_drive_types
+ interface_types = [self.criteria_drive_interface_type] \
+ if self.criteria_drive_interface_type else self.available_drive_interface_types
+
+ for interface_type in interface_types:
+ for drive_type in drive_types:
+ candidates = None
+ volume_candidate_request_data = dict(
+ type="diskPool" if self.raid_level == "raidDiskPool" else "traditional",
+ diskPoolVolumeCandidateRequestData=dict(
+ reconstructionReservedDriveCount=65535))
+ candidate_selection_type = dict(
+ candidateSelectionType="count",
+ driveRefList=dict(driveRef=self.available_drives))
+ criteria = dict(raidLevel=self.raid_level,
+ phyDriveType=interface_type,
+ dssPreallocEnabled=False,
+ securityType="capable" if self.criteria_drive_require_fde else "none",
+ driveMediaType=drive_type,
+ onlyProtectionInformationCapable=True if self.criteria_drive_require_da else False,
+ volumeCandidateRequestData=volume_candidate_request_data,
+ allocateReserveSpace=False,
+ securityLevel="fde" if self.criteria_drive_require_fde else "none",
+ candidateSelectionType=candidate_selection_type)
+
+ try:
+ rc, candidates = self.request("storage-systems/%s/symbol/getVolumeCandidates?verboseError"
+ "Response=true" % self.ssid, data=criteria, method="POST")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve volume candidates. Array [%s]. Error [%s]."
+ % (self.ssid, to_native(error)))
+
+ if candidates:
+ candidates_list.extend(candidates["volumeCandidate"])
+
+ if candidates_list and not self.usable_drives:
+ def candidate_sort_function(entry):
+ """Orders candidates based on tray/drawer loss protection."""
+ preference = 3
+ if entry["drawerLossProtection"]:
+ preference -= 1
+ if entry["trayLossProtection"]:
+ preference -= 2
+ return preference
+
+ candidates_list.sort(key=candidate_sort_function)
+
+ # Replace drive selection with required usable drives
+ if self.usable_drives:
+ drives = self.convert_drives_list_into_drive_info_by_ids()
+ for candidates in candidates_list:
+ candidates["driveRefList"].update({"driveRef": drives[0:candidates["driveCount"]]})
+
+ return candidates_list
+
+ @memoize
+ def get_candidate_drives(self):
+ """Retrieve set of drives candidates for creating a new storage pool."""
+ for candidate in self.get_candidate_drive_request():
+
+ # Evaluate candidates for required drive count, collective drive usable capacity and minimum drive size
+ if self.criteria_drive_count:
+ if self.criteria_drive_count != int(candidate["driveCount"]):
+ continue
+ if self.criteria_min_usable_capacity:
+ if ((self.raid_level == "raidDiskPool" and self.criteria_min_usable_capacity >
+ self.get_ddp_capacity(candidate["driveRefList"]["driveRef"])) or
+ self.criteria_min_usable_capacity > int(candidate["usableSize"])):
+ continue
+ if self.criteria_drive_min_size:
+ if self.criteria_drive_min_size > min(self.get_available_drive_capacities(candidate["driveRefList"]["driveRef"])):
+ continue
+ if self.criteria_drive_max_size:
+ if self.criteria_drive_max_size < min(self.get_available_drive_capacities(candidate["driveRefList"]["driveRef"])):
+ continue
+
+ return candidate
+
+ self.module.fail_json(msg="Not enough drives to meet the specified criteria. Array [%s]." % self.ssid)
+
+ @memoize
+ def get_expansion_candidate_drives(self):
+ """Retrieve required expansion drive list.
+
+ Note: To satisfy the expansion criteria each item in the candidate list must added specified group since there
+ is a potential limitation on how many drives can be incorporated at a time.
+ * Traditional raid volume groups must be added two drives maximum at a time. No limits on raid disk pools.
+
+ :return list(candidate): list of candidate structures from the getVolumeGroupExpansionCandidates symbol endpoint
+ """
+
+ def get_expansion_candidate_drive_request():
+ """Perform the request for expanding existing volume groups or disk pools.
+
+ Note: the list of candidate structures do not necessarily produce candidates that meet all criteria.
+ """
+ candidates_list = None
+ url = "storage-systems/%s/symbol/getVolumeGroupExpansionCandidates?verboseErrorResponse=true" % self.ssid
+ if self.raid_level == "raidDiskPool":
+ url = "storage-systems/%s/symbol/getDiskPoolExpansionCandidates?verboseErrorResponse=true" % self.ssid
+
+ try:
+ rc, candidates_list = self.request(url, method="POST", data=self.pool_detail["id"])
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve volume candidates. Array [%s]. Error [%s]."
+ % (self.ssid, to_native(error)))
+
+ return candidates_list["candidates"]
+
+ required_candidate_list = list()
+ required_additional_drives = 0
+ required_additional_capacity = 0
+ total_required_capacity = 0
+
+ # determine whether and how much expansion is need to satisfy the specified criteria
+ if self.criteria_min_usable_capacity:
+ total_required_capacity = self.criteria_min_usable_capacity
+ required_additional_capacity = self.criteria_min_usable_capacity - int(self.pool_detail["totalRaidedSpace"])
+
+ if self.criteria_drive_count:
+ required_additional_drives = self.criteria_drive_count - len(self.storage_pool_drives)
+
+ # Determine the appropriate expansion candidate list
+ if required_additional_drives > 0 or required_additional_capacity > 0:
+ for candidate in get_expansion_candidate_drive_request():
+
+ if self.criteria_drive_min_size:
+ if self.criteria_drive_min_size > min(self.get_available_drive_capacities(candidate["drives"])):
+ continue
+ if self.criteria_drive_max_size:
+ if self.criteria_drive_max_size < min(self.get_available_drive_capacities(candidate["drives"])):
+ continue
+
+ if self.raid_level == "raidDiskPool":
+ if (len(candidate["drives"]) >= required_additional_drives and
+ self.get_ddp_capacity(candidate["drives"]) >= total_required_capacity):
+ required_candidate_list.append(candidate)
+ break
+ else:
+ required_additional_drives -= len(candidate["drives"])
+ required_additional_capacity -= int(candidate["usableCapacity"])
+ required_candidate_list.append(candidate)
+
+ # Determine if required drives and capacities are satisfied
+ if required_additional_drives <= 0 and required_additional_capacity <= 0:
+ break
+ else:
+ self.module.fail_json(msg="Not enough drives to meet the specified criteria. Array [%s]." % self.ssid)
+
+ return required_candidate_list
+
+ def get_reserve_drive_count(self):
+ """Retrieve the current number of reserve drives for raidDiskPool (Only for raidDiskPool)."""
+
+ if not self.pool_detail:
+ self.module.fail_json(msg="The storage pool must exist. Array [%s]." % self.ssid)
+
+ if self.raid_level != "raidDiskPool":
+ self.module.fail_json(msg="The storage pool must be a raidDiskPool. Pool [%s]. Array [%s]."
+ % (self.pool_detail["id"], self.ssid))
+
+ return self.pool_detail["volumeGroupData"]["diskPoolData"]["reconstructionReservedDriveCount"]
+
+ def get_maximum_reserve_drive_count(self):
+ """Retrieve the maximum number of reserve drives for storage pool (Only for raidDiskPool)."""
+ if self.raid_level != "raidDiskPool":
+ self.module.fail_json(msg="The storage pool must be a raidDiskPool. Pool [%s]. Array [%s]."
+ % (self.pool_detail["id"], self.ssid))
+
+ drives_ids = list()
+
+ if self.pool_detail:
+ drives_ids.extend(self.storage_pool_drives)
+ for candidate in self.get_expansion_candidate_drives():
+ drives_ids.extend((candidate["drives"]))
+ else:
+ candidate = self.get_candidate_drives()
+ drives_ids.extend(candidate["driveRefList"]["driveRef"])
+
+ drive_count = len(drives_ids)
+ maximum_reserve_drive_count = min(int(drive_count * 0.2 + 1), drive_count - 10)
+ if maximum_reserve_drive_count > 10:
+ maximum_reserve_drive_count = 10
+
+ return maximum_reserve_drive_count
+
+ def set_reserve_drive_count(self, check_mode=False):
+ """Set the reserve drive count for raidDiskPool."""
+ changed = False
+
+ if self.raid_level == "raidDiskPool" and self.reserve_drive_count:
+ maximum_count = self.get_maximum_reserve_drive_count()
+
+ if self.reserve_drive_count < 0 or self.reserve_drive_count > maximum_count:
+ self.module.fail_json(msg="Supplied reserve drive count is invalid or exceeds the maximum allowed. "
+ "Note that it may be necessary to wait for expansion operations to complete "
+ "before the adjusting the reserve drive count. Maximum [%s]. Array [%s]."
+ % (maximum_count, self.ssid))
+
+ if self.reserve_drive_count != self.get_reserve_drive_count():
+ changed = True
+
+ if not check_mode:
+ try:
+ rc, resp = self.request("storage-systems/%s/symbol/setDiskPoolReservedDriveCount" % self.ssid,
+ method="POST", data=dict(volumeGroupRef=self.pool_detail["id"],
+ newDriveCount=self.reserve_drive_count))
+ except Exception as error:
+ self.module.fail_json(msg="Failed to set reserve drive count for disk pool. Disk Pool [%s]."
+ " Array [%s]." % (self.pool_detail["id"], self.ssid))
+
+ return changed
+
+ def erase_all_available_secured_drives(self, check_mode=False):
+ """Erase all available drives that have encryption at rest feature enabled."""
+ changed = False
+ drives_list = list()
+ for drive in self.drives:
+ if drive["available"] and drive["fdeEnabled"]:
+ changed = True
+ drives_list.append(drive["id"])
+
+ if drives_list and not check_mode:
+ try:
+ rc, resp = self.request("storage-systems/%s/symbol/reprovisionDrive?verboseErrorResponse=true"
+ % self.ssid, method="POST", data=dict(driveRef=drives_list))
+ except Exception as error:
+ self.module.fail_json(msg="Failed to erase all secured drives. Array [%s]" % self.ssid)
+
+ return changed
+
+ def create_storage_pool(self):
+ """Create new storage pool."""
+ url = "storage-systems/%s/symbol/createVolumeGroup?verboseErrorResponse=true" % self.ssid
+ request_body = dict(label=self.name,
+ candidate=self.get_candidate_drives())
+
+ if self.raid_level == "raidDiskPool":
+ url = "storage-systems/%s/symbol/createDiskPool?verboseErrorResponse=true" % self.ssid
+
+ request_body.update(
+ dict(backgroundOperationPriority="useDefault",
+ criticalReconstructPriority="useDefault",
+ degradedReconstructPriority="useDefault",
+ poolUtilizationCriticalThreshold=self.ddp_critical_threshold_pct,
+ poolUtilizationWarningThreshold=self.ddp_warning_threshold_pct))
+
+ if self.reserve_drive_count:
+ request_body.update(dict(volumeCandidateData=dict(
+ diskPoolVolumeCandidateData=dict(reconstructionReservedDriveCount=self.reserve_drive_count))))
+
+ try:
+ rc, resp = self.request(url, method="POST", data=request_body)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to create storage pool. Array id [%s]. Error [%s]."
+ % (self.ssid, to_native(error)))
+
+ # Update drive and storage pool information
+ self.pool_detail = self.storage_pool
+
+ def delete_storage_pool(self):
+ """Delete storage pool."""
+ storage_pool_drives = [drive["id"] for drive in self.storage_pool_drives if drive["fdeEnabled"]]
+ try:
+ delete_volumes_parameter = "?delete-volumes=true" if self.remove_volumes else ""
+ rc, resp = self.request("storage-systems/%s/storage-pools/%s%s"
+ % (self.ssid, self.pool_detail["id"], delete_volumes_parameter), method="DELETE")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to delete storage pool. Pool id [%s]. Array id [%s]. Error [%s]."
+ % (self.pool_detail["id"], self.ssid, to_native(error)))
+
+ if storage_pool_drives and self.erase_secured_drives:
+ try:
+ rc, resp = self.request("storage-systems/%s/symbol/reprovisionDrive?verboseErrorResponse=true"
+ % self.ssid, method="POST", data=dict(driveRef=storage_pool_drives))
+ except Exception as error:
+ self.module.fail_json(msg="Failed to erase drives prior to creating new storage pool. Array [%s]."
+ " Error [%s]." % (self.ssid, to_native(error)))
+
+ def secure_storage_pool(self, check_mode=False):
+ """Enable security on an existing storage pool"""
+ self.pool_detail = self.storage_pool
+ needs_secure_pool = False
+
+ if not self.secure_pool and self.pool_detail["securityType"] == "enabled":
+ self.module.fail_json(msg="It is not possible to disable storage pool security! See array documentation.")
+ if self.secure_pool and self.pool_detail["securityType"] != "enabled":
+ needs_secure_pool = True
+
+ if needs_secure_pool and not check_mode:
+ try:
+ rc, resp = self.request("storage-systems/%s/storage-pools/%s" % (self.ssid, self.pool_detail["id"]),
+ data=dict(securePool=True), method="POST")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to secure storage pool. Pool id [%s]. Array [%s]. Error"
+ " [%s]." % (self.pool_detail["id"], self.ssid, to_native(error)))
+
+ self.pool_detail = self.storage_pool
+ return needs_secure_pool
+
+ def migrate_raid_level(self, check_mode=False):
+ """Request storage pool raid level migration."""
+ needs_migration = self.raid_level != self.pool_detail["raidLevel"]
+ if needs_migration and self.pool_detail["raidLevel"] == "raidDiskPool":
+ self.module.fail_json(msg="Raid level cannot be changed for disk pools")
+
+ if needs_migration and not check_mode:
+ sp_raid_migrate_req = dict(raidLevel=self.raid_level)
+
+ try:
+ rc, resp = self.request("storage-systems/%s/storage-pools/%s/raid-type-migration"
+ % (self.ssid, self.name), data=sp_raid_migrate_req, method="POST")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to change the raid level of storage pool. Array id [%s]."
+ " Error [%s]." % (self.ssid, to_native(error)))
+
+ self.pool_detail = self.storage_pool
+ return needs_migration
+
+ def update_ddp_settings(self, check_mode=False):
+ """Update dynamic disk pool settings."""
+ if self.raid_level != "raidDiskPool":
+ return False
+
+ needs_update = False
+ if (self.pool_detail["volumeGroupData"]["diskPoolData"]["poolUtilizationWarningThreshold"] != self.ddp_warning_threshold_pct or
+ self.pool_detail["volumeGroupData"]["diskPoolData"]["poolUtilizationCriticalThreshold"] != self.ddp_critical_threshold_pct):
+ needs_update = True
+
+ if needs_update and check_mode:
+ if self.pool_detail["volumeGroupData"]["diskPoolData"]["poolUtilizationWarningThreshold"] != self.ddp_warning_threshold_pct:
+ try:
+ rc, update = self.request("storage-systems/%s/storage-pools/%s" % (self.ssid, self.pool_detail["id"]), method="POST",
+ data={"id": self.pool_detail["id"],
+ "poolThreshold": {"thresholdType": "warning", "value": self.ddp_warning_threshold_pct}})
+ except Exception as error:
+ self.module.fail_json(msg="Failed to update DDP warning alert threshold! Pool [%s]. Array [%s]."
+ " Error [%s]" % (self.name, self.ssid, to_native(error)))
+
+ if self.pool_detail["volumeGroupData"]["diskPoolData"]["poolUtilizationCriticalThreshold"] != self.ddp_critical_threshold_pct:
+ try:
+ rc, update = self.request("storage-systems/%s/storage-pools/%s" % (self.ssid, self.pool_detail["id"]), method="POST",
+ data={"id": self.pool_detail["id"],
+ "poolThreshold": {"thresholdType": "critical", "value": self.ddp_critical_threshold_pct}})
+ except Exception as error:
+ self.module.fail_json(msg="Failed to update DDP critical alert threshold! Pool [%s]. Array [%s]."
+ " Error [%s]" % (self.name, self.ssid, to_native(error)))
+ return needs_update
+
+ def expand_storage_pool(self, check_mode=False):
+ """Add drives to existing storage pool.
+
+ :return bool: whether drives were required to be added to satisfy the specified criteria."""
+ expansion_candidate_list = self.get_expansion_candidate_drives()
+ changed_required = bool(expansion_candidate_list)
+ estimated_completion_time = 0.0
+
+ # build expandable groupings of traditional raid candidate
+ required_expansion_candidate_list = list()
+ while expansion_candidate_list:
+ subset = list()
+ while expansion_candidate_list and len(subset) < self.expandable_drive_count:
+ subset.extend(expansion_candidate_list.pop()["drives"])
+ required_expansion_candidate_list.append(subset)
+
+ if required_expansion_candidate_list and not check_mode:
+ url = "storage-systems/%s/symbol/startVolumeGroupExpansion?verboseErrorResponse=true" % self.ssid
+ if self.raid_level == "raidDiskPool":
+ url = "storage-systems/%s/symbol/startDiskPoolExpansion?verboseErrorResponse=true" % self.ssid
+
+ while required_expansion_candidate_list:
+ candidate_drives_list = required_expansion_candidate_list.pop()
+ request_body = dict(volumeGroupRef=self.pool_detail["volumeGroupRef"],
+ driveRef=candidate_drives_list)
+ try:
+ rc, resp = self.request(url, method="POST", data=request_body)
+ except Exception as error:
+ rc, actions_resp = self.request("storage-systems/%s/storage-pools/%s/action-progress"
+ % (self.ssid, self.pool_detail["id"]), ignore_errors=True)
+ if rc == 200 and actions_resp:
+ actions = [action["currentAction"] for action in actions_resp
+ if action["volumeRef"] in self.storage_pool_volumes]
+ self.module.fail_json(msg="Failed to add drives to the storage pool possibly because of actions"
+ " in progress. Actions [%s]. Pool id [%s]. Array id [%s]. Error [%s]."
+ % (", ".join(actions), self.pool_detail["id"], self.ssid,
+ to_native(error)))
+
+ self.module.fail_json(msg="Failed to add drives to storage pool. Pool id [%s]. Array id [%s]."
+ " Error [%s]." % (self.pool_detail["id"], self.ssid, to_native(error)))
+
+ # Wait for expansion completion unless it is the last request in the candidate list
+ if required_expansion_candidate_list:
+ for dummy in range(self.EXPANSION_TIMEOUT_SEC):
+ rc, actions_resp = self.request("storage-systems/%s/storage-pools/%s/action-progress"
+ % (self.ssid, self.pool_detail["id"]), ignore_errors=True)
+ if rc == 200:
+ for action in actions_resp:
+ if (action["volumeRef"] in self.storage_pool_volumes and
+ action["currentAction"] == "remappingDce"):
+ sleep(1)
+ estimated_completion_time = action["estimatedTimeToCompletion"]
+ break
+ else:
+ estimated_completion_time = 0.0
+ break
+
+ return changed_required, estimated_completion_time
+
+ def apply(self):
+ """Apply requested state to storage array."""
+ changed = False
+
+ if self.state == "present":
+ if self.criteria_drive_count is None and self.criteria_min_usable_capacity is None:
+ self.module.fail_json(msg="One of criteria_min_usable_capacity or criteria_drive_count must be"
+ " specified.")
+ if self.criteria_drive_count and not self.is_drive_count_valid(self.criteria_drive_count):
+ self.module.fail_json(msg="criteria_drive_count must be valid for the specified raid level.")
+
+ self.pool_detail = self.storage_pool
+ self.module.log(pformat(self.pool_detail))
+
+ if self.state == "present" and self.erase_secured_drives:
+ self.erase_all_available_secured_drives(check_mode=True)
+
+ # Determine whether changes need to be applied to the storage array
+ if self.pool_detail:
+
+ if self.state == "absent":
+ changed = True
+
+ elif self.state == "present":
+
+ if self.criteria_drive_count and self.criteria_drive_count < len(self.storage_pool_drives):
+ self.module.fail_json(msg="Failed to reduce the size of the storage pool. Array [%s]. Pool [%s]."
+ % (self.ssid, self.pool_detail["id"]))
+
+ if self.criteria_drive_type and self.criteria_drive_type != self.pool_detail["driveMediaType"]:
+ self.module.fail_json(msg="Failed! It is not possible to modify storage pool media type."
+ " Array [%s]. Pool [%s]." % (self.ssid, self.pool_detail["id"]))
+
+ if (self.criteria_drive_require_da is not None and self.criteria_drive_require_da !=
+ self.pool_detail["protectionInformationCapabilities"]["protectionInformationCapable"]):
+ self.module.fail_json(msg="Failed! It is not possible to modify DA-capability. Array [%s]."
+ " Pool [%s]." % (self.ssid, self.pool_detail["id"]))
+
+ # Evaluate current storage pool for required change.
+ needs_expansion, estimated_completion_time = self.expand_storage_pool(check_mode=True)
+ if needs_expansion:
+ changed = True
+ if self.migrate_raid_level(check_mode=True):
+ changed = True
+ if self.secure_storage_pool(check_mode=True):
+ changed = True
+ if self.set_reserve_drive_count(check_mode=True):
+ changed = True
+ if self.update_ddp_settings(check_mode=True):
+ changed = True
+
+ elif self.state == "present":
+ changed = True
+
+ # Apply changes to storage array
+ msg = "No changes were required for the storage pool [%s]."
+ if changed and not self.module.check_mode:
+ if self.state == "present":
+ if self.erase_secured_drives:
+ self.erase_all_available_secured_drives()
+
+ if self.pool_detail:
+ change_list = list()
+
+ # Expansion needs to occur before raid level migration to account for any sizing needs.
+ expanded, estimated_completion_time = self.expand_storage_pool()
+ if expanded:
+ change_list.append("expanded")
+ if self.migrate_raid_level():
+ change_list.append("raid migration")
+ if self.secure_storage_pool():
+ change_list.append("secured")
+ if self.set_reserve_drive_count():
+ change_list.append("adjusted reserve drive count")
+
+ if self.update_ddp_settings():
+ change_list.append("updated ddp settings")
+
+ if change_list:
+ msg = "Following changes have been applied to the storage pool [%s]: " + ", ".join(change_list)
+
+ if expanded:
+ msg += "\nThe expansion operation will complete in an estimated %s minutes." % estimated_completion_time
+ else:
+ self.create_storage_pool()
+ msg = "Storage pool [%s] was created."
+
+ if self.secure_storage_pool():
+ msg = "Storage pool [%s] was created and secured."
+ if self.set_reserve_drive_count():
+ msg += " Adjusted reserve drive count."
+
+ elif self.pool_detail:
+ self.delete_storage_pool()
+ msg = "Storage pool [%s] removed."
+
+ self.pool_detail = self.storage_pool
+ self.module.log(pformat(self.pool_detail))
+ self.module.log(msg % self.name)
+ self.module.exit_json(msg=msg % self.name, changed=changed, **self.pool_detail)
+
+
+def main():
+ storage_pool = NetAppESeriesStoragePool()
+ storage_pool.apply()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_syslog.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_syslog.py
new file mode 100644
index 000000000..212957ead
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_syslog.py
@@ -0,0 +1,248 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+---
+module: na_santricity_syslog
+short_description: NetApp E-Series manage syslog settings
+description:
+ - Allow the syslog settings to be configured for an individual E-Series storage-system
+author: Nathan Swartz (@ndswartz)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.santricity_doc
+options:
+ state:
+ description:
+ - Add or remove the syslog server configuration for E-Series storage array.
+ - Existing syslog server configuration will be removed or updated when its address matches I(address).
+ - Fully qualified hostname that resolve to an IPv4 address that matches I(address) will not be
+ treated as a match.
+ type: str
+ choices:
+ - present
+ - absent
+ default: present
+ required: false
+ address:
+ description:
+ - The syslog server's IPv4 address or a fully qualified hostname.
+ - All existing syslog configurations will be removed when I(state=absent) and I(address=None).
+ type: str
+ required: false
+ port:
+ description:
+ - This is the port the syslog server is using.
+ type: int
+ default: 514
+ required: false
+ protocol:
+ description:
+ - This is the transmission protocol the syslog server's using to receive syslog messages.
+ type: str
+ default: udp
+ choices:
+ - udp
+ - tcp
+ - tls
+ required: false
+ components:
+ description:
+ - The e-series logging components define the specific logs to transfer to the syslog server.
+ - At the time of writing, 'auditLog' is the only logging component but more may become available.
+ type: list
+ default: ["auditLog"]
+ required: false
+ test:
+ description:
+ - This forces a test syslog message to be sent to the stated syslog server.
+ - Only attempts transmission when I(state=present).
+ type: bool
+ default: false
+ required: false
+notes:
+ - Check mode is supported.
+ - This API is currently only supported with the Embedded Web Services API v2.12 (bundled with
+ SANtricity OS 11.40.2) and higher.
+"""
+
+EXAMPLES = """
+ - name: Add two syslog server configurations to NetApp E-Series storage array.
+ na_santricity_syslog:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ state: present
+ address: "{{ item }}"
+ port: 514
+ protocol: tcp
+ component: "auditLog"
+ loop:
+ - "192.168.1.1"
+ - "192.168.1.100"
+"""
+
+RETURN = """
+msg:
+ description: Success message
+ returned: on success
+ type: str
+ sample: The settings have been updated.
+syslog:
+ description:
+ - True if syslog server configuration has been added to e-series storage array.
+ returned: on success
+ sample: True
+ type: bool
+"""
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule
+from ansible.module_utils._text import to_native
+
+
+class NetAppESeriesSyslog(NetAppESeriesModule):
+ def __init__(self):
+ ansible_options = dict(
+ state=dict(choices=["present", "absent"], required=False, default="present"),
+ address=dict(type="str", required=False),
+ port=dict(type="int", default=514, required=False),
+ protocol=dict(choices=["tcp", "tls", "udp"], default="udp", required=False),
+ components=dict(type="list", required=False, default=["auditLog"]),
+ test=dict(type="bool", default=False, require=False))
+
+ required_if = [["state", "present", ["address", "port", "protocol", "components"]]]
+ mutually_exclusive = [["test", "absent"]]
+ super(NetAppESeriesSyslog, self).__init__(ansible_options=ansible_options,
+ web_services_version="02.00.0000.0000",
+ mutually_exclusive=mutually_exclusive,
+ required_if=required_if,
+ supports_check_mode=True)
+ args = self.module.params
+
+ self.syslog = args["state"] in ["present"]
+ self.address = args["address"]
+ self.port = args["port"]
+ self.protocol = args["protocol"]
+ self.components = args["components"]
+ self.test = args["test"]
+ self.ssid = args["ssid"]
+ self.url = args["api_url"]
+ self.creds = dict(url_password=args["api_password"],
+ validate_certs=args["validate_certs"],
+ url_username=args["api_username"], )
+
+ self.components.sort()
+ self.check_mode = self.module.check_mode
+
+ # Check whether request needs to be forwarded on to the controller web services rest api.
+ self.url_path_prefix = ""
+ if not self.is_embedded() and self.ssid != "0" and self.ssid.lower() != "proxy":
+ self.url_path_prefix = "storage-systems/%s/forward/devmgr/v2/" % self.ssid
+
+ def get_configuration(self):
+ """Retrieve existing syslog configuration."""
+ try:
+ rc, result = self.request(self.url_path_prefix + "storage-systems/%s/syslog" % self.ssid)
+ return result
+ except Exception as err:
+ self.module.fail_json(msg="Failed to retrieve syslog configuration! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+
+ def test_configuration(self, body):
+ """Send test syslog message to the storage array.
+
+ Allows fix number of retries to occur before failure is issued to give the storage array time to create
+ new syslog server record.
+ """
+ try:
+ rc, result = self.request(self.url_path_prefix + "storage-systems/%s/syslog/%s/test" % (self.ssid, body["id"]), method='POST')
+ except Exception as err:
+ self.module.fail_json(msg="We failed to send test message! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+
+ def update_configuration(self):
+ """Post the syslog request to array."""
+ config_match = None
+ perfect_match = None
+ update = False
+ body = dict()
+
+ # search existing configuration for syslog server entry match
+ configs = self.get_configuration()
+ if self.address:
+ for config in configs:
+ if config["serverAddress"] == self.address:
+ config_match = config
+ if (config["port"] == self.port and config["protocol"] == self.protocol and
+ len(config["components"]) == len(self.components) and
+ all([component["type"] in self.components for component in config["components"]])):
+ perfect_match = config_match
+ break
+
+ # generate body for the http request
+ if self.syslog:
+ if not perfect_match:
+ update = True
+ if config_match:
+ body.update(dict(id=config_match["id"]))
+ components = [dict(type=component_type) for component_type in self.components]
+ body.update(dict(serverAddress=self.address, port=self.port,
+ protocol=self.protocol, components=components))
+ self.make_configuration_request(body)
+
+ elif config_match:
+
+ # remove specific syslog server configuration
+ if self.address:
+ update = True
+ body.update(dict(id=config_match["id"]))
+ self.make_configuration_request(body)
+
+ # if no address is specified, remove all syslog server configurations
+ elif configs:
+ update = True
+ for config in configs:
+ body.update(dict(id=config["id"]))
+ self.make_configuration_request(body)
+
+ return update
+
+ def make_configuration_request(self, body):
+ # make http request(s)
+ if not self.check_mode:
+ try:
+ if self.syslog:
+ if "id" in body:
+ rc, result = self.request(self.url_path_prefix + "storage-systems/%s/syslog/%s" % (self.ssid, body["id"]),
+ method='POST', data=body)
+ else:
+ rc, result = self.request(self.url_path_prefix + "storage-systems/%s/syslog" % self.ssid, method='POST', data=body)
+ body.update(result)
+
+ # send syslog test message
+ if self.test:
+ self.test_configuration(body)
+
+ elif "id" in body:
+ rc, result = self.request(self.url_path_prefix + "storage-systems/%s/syslog/%s" % (self.ssid, body["id"]), method='DELETE')
+
+ # This is going to catch cases like a connection failure
+ except Exception as err:
+ self.module.fail_json(msg="We failed to modify syslog configuration! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+
+ def update(self):
+ """Update configuration and respond to ansible."""
+ update = self.update_configuration()
+ self.module.exit_json(msg="The syslog settings have been updated.", changed=update)
+
+
+def main():
+ settings = NetAppESeriesSyslog()
+ settings.update()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_volume.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_volume.py
new file mode 100644
index 000000000..3a3552ff3
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_volume.py
@@ -0,0 +1,945 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+---
+module: na_santricity_volume
+short_description: NetApp E-Series manage storage volumes (standard and thin)
+description:
+ - Create or remove volumes (standard and thin) for NetApp E/EF-series storage arrays.
+author:
+ - Nathan Swartz (@ndswartz)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.santricity_doc
+options:
+ state:
+ description:
+ - Whether the specified volume should exist
+ type: str
+ choices: ["present", "absent"]
+ default: "present"
+ name:
+ description:
+ - The name of the volume to manage.
+ type: str
+ required: true
+ storage_pool_name:
+ description:
+ - Required only when requested I(state=="present").
+ - Name of the storage pool wherein the volume should reside.
+ type: str
+ required: false
+ size_unit:
+ description:
+ - The unit used to interpret the size parameter
+ - pct unit defines a percent of total usable storage pool size.
+ type: str
+ choices: ["bytes", "b", "kb", "mb", "gb", "tb", "pb", "eb", "zb", "yb", "pct"]
+ default: "gb"
+ size:
+ description:
+ - Required only when I(state=="present").
+ - Size of the volume in I(size_unit).
+ - Size of the virtual volume in the case of a thin volume in I(size_unit).
+ - Maximum virtual volume size of a thin provisioned volume is 256tb; however other OS-level restrictions may exist.
+ type: float
+ required: true
+ size_tolerance_b:
+ description:
+ - Tolerance for total volume size measured in bytes; so, if the total volumes size is within
+ +/- I(size_tolerance_b) then no resizing will be expected.
+ - This parameter can be useful in the case of existing volumes not created by na_santricity_volume
+ since providing the exact size can be difficult due to volume alignment and overhead.
+ type: int
+ required: false
+ default: 10485760
+ segment_size_kb:
+ description:
+ - Segment size of the volume
+ - All values are in kibibytes.
+ - Some common choices include 8, 16, 32, 64, 128, 256, and 512 but options are system
+ dependent.
+ - Retrieve the definitive system list from M(na_santricity_facts) under segment_sizes.
+ - When the storage pool is a raidDiskPool then the segment size must be 128kb.
+ - Segment size migrations are not allowed in this module
+ type: int
+ default: 128
+ thin_provision:
+ description:
+ - Whether the volume should be thin provisioned.
+ - Thin volumes can only be created when I(raid_level=="raidDiskPool").
+ - Generally, use of thin-provisioning is not recommended due to performance impacts.
+ type: bool
+ default: false
+ required: false
+ thin_volume_repo_size:
+ description:
+ - This value (in size_unit) sets the allocated space for the thin provisioned repository.
+ - Initial value must between or equal to 4gb and 256gb in increments of 4gb.
+ - During expansion operations the increase must be between or equal to 4gb and 256gb in increments of 4gb.
+ - This option has no effect during expansion if I(thin_volume_expansion_policy=="automatic").
+ - Generally speaking you should almost always use I(thin_volume_expansion_policy=="automatic).
+ type: int
+ required: false
+ thin_volume_max_repo_size:
+ description:
+ - This is the maximum amount the thin volume repository will be allowed to grow.
+ - Only has significance when I(thin_volume_expansion_policy=="automatic").
+ - When the pct I(thin_volume_repo_size) of I(thin_volume_max_repo_size) exceeds
+ I(thin_volume_growth_alert_threshold) then a warning will be issued and the storage array will execute
+ the I(thin_volume_expansion_policy) policy.
+ - Expansion operations when I(thin_volume_expansion_policy=="automatic") will increase the maximum
+ repository size.
+ - Default will be the same as I(size).
+ type: float
+ required: false
+ thin_volume_expansion_policy:
+ description:
+ - This is the thin volume expansion policy.
+ - When I(thin_volume_expansion_policy=="automatic") and I(thin_volume_growth_alert_threshold) is exceed the
+ I(thin_volume_max_repo_size) will be automatically expanded.
+ - When I(thin_volume_expansion_policy=="manual") and I(thin_volume_growth_alert_threshold) is exceeded the
+ storage system will wait for manual intervention.
+ - The thin volume_expansion policy can not be modified on existing thin volumes in this module.
+ - Generally speaking you should almost always use I(thin_volume_expansion_policy=="automatic).
+ type: str
+ choices: ["automatic", "manual"]
+ default: "automatic"
+ required: false
+ thin_volume_growth_alert_threshold:
+ description:
+ - This is the thin provision repository utilization threshold (in percent).
+ - When the pct of used storage of the maximum repository size exceeds this value then a alert will
+ be issued and the I(thin_volume_expansion_policy) will be executed.
+ - Values must be between or equal to 10 and 99.
+ type: int
+ default: 95
+ required: false
+ owning_controller:
+ description:
+ - Specifies which controller will be the primary owner of the volume
+ - Not specifying will allow the controller to choose ownership.
+ type: str
+ choices: ["A", "B"]
+ required: false
+ ssd_cache_enabled:
+ description:
+ - Whether an existing SSD cache should be enabled on the volume (fails if no SSD cache defined)
+ - The default value is to ignore existing SSD cache setting.
+ type: bool
+ default: false
+ required: false
+ data_assurance_enabled:
+ description:
+ - Determines whether data assurance (DA) should be enabled for the volume
+ - Only available when creating a new volume and on a storage pool with drives supporting the DA capability.
+ type: bool
+ default: false
+ required: false
+ read_cache_enable:
+ description:
+ - Indicates whether read caching should be enabled for the volume.
+ type: bool
+ default: true
+ required: false
+ read_ahead_enable:
+ description:
+ - Indicates whether or not automatic cache read-ahead is enabled.
+ - This option has no effect on thinly provisioned volumes since the architecture for thin volumes cannot
+ benefit from read ahead caching.
+ type: bool
+ default: true
+ required: false
+ write_cache_enable:
+ description:
+ - Indicates whether write-back caching should be enabled for the volume.
+ type: bool
+ default: true
+ required: false
+ write_cache_mirror_enable:
+ description:
+ - Indicates whether write cache mirroring should be enabled.
+ type: bool
+ default: true
+ required: false
+ cache_without_batteries:
+ description:
+ - Indicates whether caching should be used without battery backup.
+ - Warning, M(cache_without_batteries==true) and the storage system looses power and there is no battery backup, data will be lost!
+ type: bool
+ default: false
+ required: false
+ workload_name:
+ description:
+ - Label for the workload defined by the metadata.
+ - When I(workload_name) and I(metadata) are specified then the defined workload will be added to the storage
+ array.
+ - When I(workload_name) exists on the storage array but the metadata is different then the workload
+ definition will be updated. (Changes will update all associated volumes!)
+ - Existing workloads can be retrieved using M(na_santricity_facts).
+ type: str
+ required: false
+ workload_metadata:
+ description:
+ - Dictionary containing meta data for the use, user, location, etc of the volume (dictionary is arbitrarily
+ defined for whatever the user deems useful)
+ - When I(workload_name) exists on the storage array but the metadata is different then the workload
+ definition will be updated. (Changes will update all associated volumes!)
+ - I(workload_name) must be specified when I(metadata) are defined.
+ - Dictionary key cannot be longer than 16 characters
+ - Dictionary values cannot be longer than 60 characters
+ type: dict
+ required: false
+ aliases:
+ - metadata
+ volume_metadata:
+ description:
+ - Dictionary containing metadata for the volume itself.
+ - Dictionary key cannot be longer than 14 characters
+ - Dictionary values cannot be longer than 240 characters
+ type: dict
+ required: false
+ allow_expansion:
+ description:
+ - Allows volume size to expand to meet the required specification.
+ - Warning, when I(allows_expansion==false) and the existing volume needs to be expanded the module will continue with a warning.
+ type: bool
+ default: false
+ required: false
+ wait_for_initialization:
+ description:
+ - Forces the module to wait for expansion operations to complete before continuing.
+ type: bool
+ default: false
+ required: false
+"""
+EXAMPLES = """
+- name: Create simple volume with workload tags (volume meta data)
+ na_santricity_volume:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ state: present
+ name: volume
+ storage_pool_name: storage_pool
+ size: 300
+ size_unit: gb
+ workload_name: volume_tag
+ metadata:
+ key1: value1
+ key2: value2
+
+- name: Create a thin volume
+ na_santricity_volume:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ state: present
+ name: volume1
+ storage_pool_name: storage_pool
+ size: 131072
+ size_unit: gb
+ thin_provision: true
+ thin_volume_repo_size: 32
+ thin_volume_max_repo_size: 1024
+
+- name: Expand thin volume's virtual size
+ na_santricity_volume:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ state: present
+ name: volume1
+ storage_pool_name: storage_pool
+ size: 262144
+ size_unit: gb
+ thin_provision: true
+ thin_volume_repo_size: 32
+ thin_volume_max_repo_size: 1024
+
+- name: Expand thin volume's maximum repository size
+ na_santricity_volume:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ state: present
+ name: volume1
+ storage_pool_name: storage_pool
+ size: 262144
+ size_unit: gb
+ thin_provision: true
+ thin_volume_repo_size: 32
+ thin_volume_max_repo_size: 2048
+
+- name: Delete volume
+ na_santricity_volume:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ state: absent
+ name: volume
+"""
+RETURN = """
+msg:
+ description: State of volume
+ type: str
+ returned: always
+ sample: "Standard volume [workload_vol_1] has been created."
+"""
+
+import time
+
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule
+from ansible.module_utils._text import to_native
+
+
+class NetAppESeriesVolume(NetAppESeriesModule):
+ VOLUME_CREATION_BLOCKING_TIMEOUT_SEC = 300
+ MAXIMUM_VOLUME_METADATA_KEY_LENGTH = 14
+ MAXIMUM_VOLUME_METADATA_VALUE_LENGTH = 240
+ MAXIMUM_VOLUME_METADATA_VALUE_SEGMENT_LENGTH = 60
+
+ def __init__(self):
+ ansible_options = dict(
+ state=dict(choices=["present", "absent"], default="present"),
+ name=dict(required=True, type="str"),
+ storage_pool_name=dict(type="str"),
+ size_unit=dict(default="gb", choices=["bytes", "b", "kb", "mb", "gb", "tb", "pb", "eb", "zb", "yb", "pct"], type="str"),
+ size=dict(type="float"),
+ size_tolerance_b=dict(type="int", required=False, default=10485760),
+ segment_size_kb=dict(type="int", default=128, required=False),
+ owning_controller=dict(type="str", choices=["A", "B"], required=False),
+ ssd_cache_enabled=dict(type="bool", default=False),
+ data_assurance_enabled=dict(type="bool", default=False),
+ thin_provision=dict(type="bool", default=False),
+ thin_volume_repo_size=dict(type="int", required=False),
+ thin_volume_max_repo_size=dict(type="float", required=False),
+ thin_volume_expansion_policy=dict(type="str", choices=["automatic", "manual"], default="automatic", required=False),
+ thin_volume_growth_alert_threshold=dict(type="int", default=95),
+ read_cache_enable=dict(type="bool", default=True),
+ read_ahead_enable=dict(type="bool", default=True),
+ write_cache_enable=dict(type="bool", default=True),
+ write_cache_mirror_enable=dict(type="bool", default=True),
+ cache_without_batteries=dict(type="bool", default=False),
+ workload_name=dict(type="str", required=False),
+ workload_metadata=dict(type="dict", require=False, aliases=["metadata"]),
+ volume_metadata=dict(type="dict", require=False),
+ allow_expansion=dict(type="bool", default=False),
+ wait_for_initialization=dict(type="bool", default=False))
+
+ required_if = [
+ ["state", "present", ["storage_pool_name", "size"]],
+ ["thin_provision", "true", ["thin_volume_repo_size"]]
+ ]
+
+ super(NetAppESeriesVolume, self).__init__(ansible_options=ansible_options,
+ web_services_version="02.00.0000.0000",
+ supports_check_mode=True,
+ required_if=required_if)
+
+ args = self.module.params
+ self.state = args["state"]
+ self.name = args["name"]
+ self.storage_pool_name = args["storage_pool_name"]
+ self.size_unit = args["size_unit"]
+ self.size_tolerance_b = args["size_tolerance_b"]
+ self.segment_size_kb = args["segment_size_kb"]
+
+ if args["size"]:
+ if self.size_unit == "pct":
+ if args["thin_provision"]:
+ self.module.fail_json(msg="'pct' is an invalid size unit for thin provisioning! Array [%s]." % self.ssid)
+ self.size_percent = args["size"]
+ else:
+ self.size_b = self.convert_to_aligned_bytes(args["size"])
+
+ self.owning_controller_id = None
+ if args["owning_controller"]:
+ self.owning_controller_id = "070000000000000000000001" if args["owning_controller"] == "A" else "070000000000000000000002"
+
+ self.read_cache_enable = args["read_cache_enable"]
+ self.read_ahead_enable = args["read_ahead_enable"]
+ self.write_cache_enable = args["write_cache_enable"]
+ self.write_cache_mirror_enable = args["write_cache_mirror_enable"]
+ self.ssd_cache_enabled = args["ssd_cache_enabled"]
+ self.cache_without_batteries = args["cache_without_batteries"]
+ self.data_assurance_enabled = args["data_assurance_enabled"]
+
+ self.thin_provision = args["thin_provision"]
+ self.thin_volume_expansion_policy = args["thin_volume_expansion_policy"]
+ self.thin_volume_growth_alert_threshold = int(args["thin_volume_growth_alert_threshold"])
+ self.thin_volume_repo_size_b = None
+ self.thin_volume_max_repo_size_b = None
+
+ if args["thin_volume_repo_size"]:
+ self.thin_volume_repo_size_b = self.convert_to_aligned_bytes(args["thin_volume_repo_size"])
+ if args["thin_volume_max_repo_size"]:
+ self.thin_volume_max_repo_size_b = self.convert_to_aligned_bytes(args["thin_volume_max_repo_size"])
+
+ self.workload_name = args["workload_name"]
+ self.allow_expansion = args["allow_expansion"]
+ self.wait_for_initialization = args["wait_for_initialization"]
+
+ # convert metadata to a list of dictionaries containing the keys "key" and "value" corresponding to
+ # each of the workload attributes dictionary entries
+ self.metadata = []
+ if self.state == "present" and args["workload_metadata"]:
+ if not self.workload_name:
+ self.module.fail_json(msg="When metadata is specified then the name for the workload must be specified. Array [%s]." % self.ssid)
+
+ for key, value in args["workload_metadata"].items():
+ self.metadata.append({"key": key, "value": value})
+
+ self.volume_metadata = []
+ if self.state == "present" and args["volume_metadata"]:
+ for key, value in args["volume_metadata"].items():
+ key, value = str(key), str(value)
+
+ if len(key) > self.MAXIMUM_VOLUME_METADATA_KEY_LENGTH:
+ self.module.fail_json(msg="Volume metadata keys must be less than %s characters long. Array [%s]."
+ % (str(self.MAXIMUM_VOLUME_METADATA_KEY_LENGTH), self.ssid))
+
+ if len(value) > self.MAXIMUM_VOLUME_METADATA_VALUE_LENGTH:
+ self.module.fail_json(msg="Volume metadata values must be less than %s characters long. Array [%s]."
+ % (str(self.MAXIMUM_VOLUME_METADATA_VALUE_LENGTH), self.ssid))
+
+ if value:
+ for index, start in enumerate(range(0, len(value), self.MAXIMUM_VOLUME_METADATA_VALUE_SEGMENT_LENGTH)):
+ if len(value) > start + self.MAXIMUM_VOLUME_METADATA_VALUE_SEGMENT_LENGTH:
+ self.volume_metadata.append({"key": "%s~%s" % (key, str(index)),
+ "value": value[start:start + self.MAXIMUM_VOLUME_METADATA_VALUE_SEGMENT_LENGTH]})
+ else:
+ self.volume_metadata.append({"key": "%s~%s" % (key, str(index)), "value": value[start:len(value)]})
+ else:
+ self.volume_metadata.append({"key": "%s~0" % key, "value": ""})
+
+ if self.state == "present" and self.thin_provision:
+ if not self.thin_volume_max_repo_size_b:
+ self.thin_volume_max_repo_size_b = self.size_b
+
+ if not self.thin_volume_expansion_policy:
+ self.thin_volume_expansion_policy = "automatic"
+
+ if self.size_b > 256 * 1024 ** 4:
+ self.module.fail_json(msg="Thin provisioned volumes must be less than or equal to 256tb is size."
+ " Attempted size [%sg]" % (self.size_b * 1024 ** 3))
+
+ if (self.thin_volume_repo_size_b and self.thin_volume_max_repo_size_b and
+ self.thin_volume_repo_size_b > self.thin_volume_max_repo_size_b):
+ self.module.fail_json(msg="The initial size of the thin volume must not be larger than the maximum"
+ " repository size. Array [%s]." % self.ssid)
+
+ if self.thin_volume_growth_alert_threshold < 10 or self.thin_volume_growth_alert_threshold > 99:
+ self.module.fail_json(msg="thin_volume_growth_alert_threshold must be between or equal to 10 and 99."
+ "thin_volume_growth_alert_threshold [%s]. Array [%s]."
+ % (self.thin_volume_growth_alert_threshold, self.ssid))
+
+ self.volume_detail = None
+ self.pool_detail = None
+ self.workload_id = None
+
+ def convert_to_aligned_bytes(self, size):
+ """Convert size to the truncated byte size that aligns on the segment size."""
+ size_bytes = int(size * self.SIZE_UNIT_MAP[self.size_unit])
+ segment_size_bytes = int(self.segment_size_kb * self.SIZE_UNIT_MAP["kb"])
+ segment_count = int(size_bytes / segment_size_bytes)
+ return segment_count * segment_size_bytes
+
+ def get_volume(self):
+ """Retrieve volume details from storage array."""
+ volumes = list()
+ thin_volumes = list()
+ try:
+ rc, volumes = self.request("storage-systems/%s/volumes" % self.ssid)
+ except Exception as err:
+ self.module.fail_json(msg="Failed to obtain list of thick volumes. Array Id [%s]. Error[%s]."
+ % (self.ssid, to_native(err)))
+ try:
+ rc, thin_volumes = self.request("storage-systems/%s/thin-volumes" % self.ssid)
+ except Exception as err:
+ self.module.fail_json(msg="Failed to obtain list of thin volumes. Array Id [%s]. Error[%s]."
+ % (self.ssid, to_native(err)))
+
+ volume_detail = [volume for volume in volumes + thin_volumes if volume["name"] == self.name]
+ return volume_detail[0] if volume_detail else dict()
+
+ def wait_for_volume_availability(self, retries=VOLUME_CREATION_BLOCKING_TIMEOUT_SEC / 5):
+ """Waits until volume becomes available.
+
+ :raises AnsibleFailJson when retries are exhausted.
+ """
+ if retries == 0:
+ self.module.fail_json(msg="Timed out waiting for the volume %s to become available. Array [%s]."
+ % (self.name, self.ssid))
+ if not self.get_volume():
+ time.sleep(5)
+ self.wait_for_volume_availability(retries=retries - 1)
+
+ def wait_for_volume_action(self, timeout=None):
+ """Waits until volume action is complete is complete.
+ :param: int timeout: Wait duration measured in seconds. Waits indefinitely when None.
+ """
+ action = "unknown"
+ percent_complete = None
+ while action != "complete":
+ time.sleep(5)
+
+ try:
+ rc, operations = self.request("storage-systems/%s/symbol/getLongLivedOpsProgress" % self.ssid)
+
+ # Search long lived operations for volume
+ action = "complete"
+ for operation in operations["longLivedOpsProgress"]:
+ if operation["volAction"] is not None:
+ for key in operation.keys():
+ if (operation[key] is not None and "volumeRef" in operation[key] and
+ (operation[key]["volumeRef"] == self.volume_detail["id"] or
+ ("storageVolumeRef" in self.volume_detail and operation[key]["volumeRef"] == self.volume_detail["storageVolumeRef"]))):
+ action = operation["volAction"]
+ percent_complete = operation["init"]["percentComplete"]
+ except Exception as err:
+ self.module.fail_json(msg="Failed to get volume expansion progress. Volume [%s]. Array Id [%s]."
+ " Error[%s]." % (self.name, self.ssid, to_native(err)))
+
+ if timeout is not None:
+ if timeout <= 0:
+ self.module.warn("Expansion action, %s, failed to complete during the allotted time. Time remaining"
+ " [%s]. Array Id [%s]." % (action, percent_complete, self.ssid))
+ self.module.fail_json(msg="Expansion action failed to complete. Time remaining [%s]. Array Id [%s]." % (percent_complete, self.ssid))
+ if timeout:
+ timeout -= 5
+
+ self.module.log("Expansion action, %s, is %s complete." % (action, percent_complete))
+ self.module.log("Expansion action is complete.")
+
+ def get_storage_pool(self):
+ """Retrieve storage pool details from the storage array."""
+ storage_pools = list()
+ try:
+ rc, storage_pools = self.request("storage-systems/%s/storage-pools" % self.ssid)
+ except Exception as err:
+ self.module.fail_json(msg="Failed to obtain list of storage pools. Array Id [%s]. Error[%s]."
+ % (self.ssid, to_native(err)))
+
+ pool_detail = [storage_pool for storage_pool in storage_pools if storage_pool["name"] == self.storage_pool_name]
+ return pool_detail[0] if pool_detail else dict()
+
+ def check_storage_pool_sufficiency(self):
+ """Perform a series of checks as to the sufficiency of the storage pool for the volume."""
+ if not self.pool_detail:
+ self.module.fail_json(msg='Requested storage pool (%s) not found' % self.storage_pool_name)
+
+ if not self.volume_detail:
+ if self.thin_provision and not self.pool_detail['diskPool']:
+ self.module.fail_json(msg='Thin provisioned volumes can only be created on raid disk pools.')
+
+ if (self.data_assurance_enabled and not
+ (self.pool_detail["protectionInformationCapabilities"]["protectionInformationCapable"] and
+ self.pool_detail["protectionInformationCapabilities"]["protectionType"] == "type2Protection")):
+ self.module.fail_json(msg="Data Assurance (DA) requires the storage pool to be DA-compatible."
+ " Array [%s]." % self.ssid)
+
+ if int(self.pool_detail["freeSpace"]) < self.size_b and not self.thin_provision:
+ self.module.fail_json(msg="Not enough storage pool free space available for the volume's needs."
+ " Array [%s]." % self.ssid)
+ else:
+ # Check for expansion
+ if (int(self.pool_detail["freeSpace"]) < int(self.volume_detail["totalSizeInBytes"]) - self.size_b and
+ not self.thin_provision):
+ self.module.fail_json(msg="Not enough storage pool free space available for the volume's needs."
+ " Array [%s]." % self.ssid)
+
+ def update_workload_tags(self, check_mode=False):
+ """Check the status of the workload tag and update storage array definitions if necessary.
+
+ When the workload attributes are not provided but an existing workload tag name is, then the attributes will be
+ used.
+
+ :return bool: Whether changes were required to be made."""
+ change_required = False
+ workload_tags = None
+ request_body = None
+ ansible_profile_id = None
+
+ if self.workload_name:
+ try:
+ rc, workload_tags = self.request("storage-systems/%s/workloads" % self.ssid)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve storage array workload tags. Array [%s]" % self.ssid)
+
+ ansible_profile_id = "Other_1"
+ request_body = dict(name=self.workload_name,
+ profileId=ansible_profile_id,
+ workloadInstanceIndex=None,
+ isValid=True)
+
+ # evaluate and update storage array when needed
+ for tag in workload_tags:
+ if tag["name"] == self.workload_name:
+ self.workload_id = tag["id"]
+
+ if not self.metadata:
+ break
+
+ # Determine if core attributes (everything but profileId) is the same
+ metadata_set = set(tuple(sorted(attr.items())) for attr in self.metadata)
+ tag_set = set(tuple(sorted(attr.items()))
+ for attr in tag["workloadAttributes"] if attr["key"] != "profileId")
+ if metadata_set != tag_set:
+ self.module.log("Workload tag change is required!")
+ change_required = True
+
+ # only perform the required action when check_mode==False
+ if change_required and not check_mode:
+ self.metadata.append(dict(key="profileId", value=ansible_profile_id))
+ request_body.update(dict(isNewWorkloadInstance=False,
+ isWorkloadDataInitialized=True,
+ isWorkloadCardDataToBeReset=True,
+ workloadAttributes=self.metadata))
+ try:
+ rc, resp = self.request("storage-systems/%s/workloads/%s" % (self.ssid, tag["id"]),
+ data=request_body, method="POST")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to create new workload tag. Array [%s]. Error [%s]"
+ % (self.ssid, to_native(error)))
+ self.module.log("Workload tag [%s] required change." % self.workload_name)
+ break
+
+ # existing workload tag not found so create new workload tag
+ else:
+ change_required = True
+ self.module.log("Workload tag creation is required!")
+
+ if change_required and not check_mode:
+ if self.metadata:
+ self.metadata.append(dict(key="profileId", value=ansible_profile_id))
+ else:
+ self.metadata = [dict(key="profileId", value=ansible_profile_id)]
+
+ request_body.update(dict(isNewWorkloadInstance=True,
+ isWorkloadDataInitialized=False,
+ isWorkloadCardDataToBeReset=False,
+ workloadAttributes=self.metadata))
+ try:
+ rc, resp = self.request("storage-systems/%s/workloads" % self.ssid,
+ method="POST", data=request_body)
+ self.workload_id = resp["id"]
+ except Exception as error:
+ self.module.fail_json(msg="Failed to create new workload tag. Array [%s]. Error [%s]"
+ % (self.ssid, to_native(error)))
+ self.module.log("Workload tag [%s] was added." % self.workload_name)
+
+ return change_required
+
+ def get_volume_property_changes(self):
+ """Retrieve the volume update request body when change(s) are required.
+
+ :raise AnsibleFailJson when attempting to change segment size on existing volume.
+ :return dict: request body when change(s) to a volume's properties are required.
+ """
+ change = False
+ request_body = dict(flashCache=self.ssd_cache_enabled, metaTags=[],
+ cacheSettings=dict(readCacheEnable=self.read_cache_enable,
+ writeCacheEnable=self.write_cache_enable,
+ mirrorEnable=self.write_cache_mirror_enable))
+
+ # check for invalid modifications
+ if self.segment_size_kb * 1024 != int(self.volume_detail["segmentSize"]):
+ self.module.fail_json(msg="Existing volume segment size is %s and cannot be modified."
+ % self.volume_detail["segmentSize"])
+
+ # common thick/thin volume properties
+ if (self.read_cache_enable != self.volume_detail["cacheSettings"]["readCacheEnable"] or
+ self.write_cache_enable != self.volume_detail["cacheSettings"]["writeCacheEnable"] or
+ self.write_cache_mirror_enable != self.volume_detail["cacheSettings"]["mirrorEnable"] or
+ self.ssd_cache_enabled != self.volume_detail["flashCached"]):
+ change = True
+
+ # controller ownership
+ if self.owning_controller_id and self.owning_controller_id != self.volume_detail["preferredManager"]:
+ change = True
+ request_body.update(dict(owningControllerId=self.owning_controller_id))
+
+ # volume meta tags
+ request_body["metaTags"].extend(self.volume_metadata)
+ for entry in self.volume_metadata:
+ if entry not in self.volume_detail["metadata"]:
+ change = True
+
+ if self.workload_name:
+ request_body["metaTags"].extend([{"key": "workloadId", "value": self.workload_id},
+ {"key": "volumeTypeId", "value": "volume"}])
+
+ if ({"key": "workloadId", "value": self.workload_id} not in self.volume_detail["metadata"] or
+ {"key": "volumeTypeId", "value": "volume"} not in self.volume_detail["metadata"]):
+ change = True
+
+ if len(self.volume_detail["metadata"]) != len(request_body["metaTags"]):
+ change = True
+
+ # thick/thin volume specific properties
+ if self.thin_provision:
+ if self.thin_volume_growth_alert_threshold != int(self.volume_detail["growthAlertThreshold"]):
+ change = True
+ request_body.update(dict(growthAlertThreshold=self.thin_volume_growth_alert_threshold))
+ if self.thin_volume_expansion_policy != self.volume_detail["expansionPolicy"]:
+ change = True
+ request_body.update(dict(expansionPolicy=self.thin_volume_expansion_policy))
+ else:
+ if self.read_ahead_enable != (int(self.volume_detail["cacheSettings"]["readAheadMultiplier"]) > 0):
+ change = True
+ request_body["cacheSettings"].update(dict(readAheadEnable=self.read_ahead_enable))
+ if self.cache_without_batteries != self.volume_detail["cacheSettings"]["cwob"]:
+ change = True
+ request_body["cacheSettings"].update(dict(cacheWithoutBatteries=self.cache_without_batteries))
+
+ return request_body if change else dict()
+
+ def get_expand_volume_changes(self):
+ """Expand the storage specifications for the existing thick/thin volume.
+
+ :raise AnsibleFailJson when a thick/thin volume expansion request fails.
+ :return dict: dictionary containing all the necessary values for volume expansion request
+ """
+ request_body = dict()
+
+ if self.size_b < int(self.volume_detail["capacity"]) - self.size_tolerance_b:
+ self.module.fail_json(msg="Reducing the size of volumes is not permitted. Volume [%s]. Array [%s]"
+ % (self.name, self.ssid))
+
+ if self.volume_detail["thinProvisioned"]:
+ if self.size_b > int(self.volume_detail["capacity"]) + self.size_tolerance_b:
+ request_body.update(dict(sizeUnit="bytes", newVirtualSize=self.size_b))
+ self.module.log("Thin volume virtual size have been expanded.")
+
+ if self.volume_detail["expansionPolicy"] == "automatic":
+ if self.thin_volume_max_repo_size_b > int(self.volume_detail["provisionedCapacityQuota"]) + self.size_tolerance_b:
+ request_body.update(dict(sizeUnit="bytes", newRepositorySize=self.thin_volume_max_repo_size_b))
+ self.module.log("Thin volume maximum repository size have been expanded (automatic policy).")
+
+ elif self.volume_detail["expansionPolicy"] == "manual":
+ if self.thin_volume_repo_size_b > int(self.volume_detail["currentProvisionedCapacity"]) + self.size_tolerance_b:
+ change = self.thin_volume_repo_size_b - int(self.volume_detail["currentProvisionedCapacity"])
+ if change < 4 * 1024 ** 3 or change > 256 * 1024 ** 3 or change % (4 * 1024 ** 3) != 0:
+ self.module.fail_json(msg="The thin volume repository increase must be between or equal to 4gb"
+ " and 256gb in increments of 4gb. Attempted size [%sg]."
+ % (self.thin_volume_repo_size_b * 1024 ** 3))
+
+ request_body.update(dict(sizeUnit="bytes", newRepositorySize=self.thin_volume_repo_size_b))
+ self.module.log("Thin volume maximum repository size have been expanded (manual policy).")
+
+ elif self.size_b > int(self.volume_detail["capacity"]) + self.size_tolerance_b:
+ request_body.update(dict(sizeUnit="bytes", expansionSize=self.size_b))
+ self.module.log("Volume storage capacities have been expanded.")
+
+ if request_body and not self.allow_expansion:
+ self.module.warn("Expansion not allowed! Change allow_expansion flag to true to allow volume expansions. Array Id [%s]." % self.ssid)
+ return dict()
+
+ return request_body
+
+ def create_volume(self):
+ """Create thick/thin volume according to the specified criteria."""
+ body = dict(name=self.name, poolId=self.pool_detail["id"], sizeUnit="bytes",
+ dataAssuranceEnabled=self.data_assurance_enabled)
+
+ if self.volume_metadata:
+ body.update({"metaTags": self.volume_metadata})
+
+ if self.thin_provision:
+ body.update(dict(virtualSize=self.size_b,
+ repositorySize=self.thin_volume_repo_size_b,
+ maximumRepositorySize=self.thin_volume_max_repo_size_b,
+ expansionPolicy=self.thin_volume_expansion_policy,
+ growthAlertThreshold=self.thin_volume_growth_alert_threshold))
+ try:
+ rc, volume = self.request("storage-systems/%s/thin-volumes" % self.ssid, data=body, method="POST")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to create thin volume. Volume [%s]. Array Id [%s]. Error[%s]."
+ % (self.name, self.ssid, to_native(error)))
+
+ self.module.log("New thin volume created [%s]." % self.name)
+
+ else:
+ body.update(dict(size=self.size_b, segSize=self.segment_size_kb))
+ try:
+ rc, volume = self.request("storage-systems/%s/volumes" % self.ssid, data=body, method="POST")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to create volume. Volume [%s]. Array Id [%s]. Error[%s]."
+ % (self.name, self.ssid, to_native(error)))
+
+ self.module.log("New volume created [%s]." % self.name)
+
+ def update_volume_properties(self):
+ """Update existing thin-volume or volume properties.
+
+ :raise AnsibleFailJson when either thick/thin volume update request fails.
+ :return bool: whether update was applied
+ """
+ self.wait_for_volume_availability()
+ self.volume_detail = self.get_volume()
+
+ request_body = self.get_volume_property_changes()
+
+ if request_body:
+ if self.thin_provision:
+ try:
+ rc, resp = self.request("storage-systems/%s/thin-volumes/%s"
+ % (self.ssid, self.volume_detail["id"]), data=request_body, method="POST")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to update thin volume properties. Volume [%s]. Array Id [%s]."
+ " Error[%s]." % (self.name, self.ssid, to_native(error)))
+ else:
+ try:
+ rc, resp = self.request("storage-systems/%s/volumes/%s" % (self.ssid, self.volume_detail["id"]),
+ data=request_body, method="POST")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to update volume properties. Volume [%s]. Array Id [%s]."
+ " Error[%s]." % (self.name, self.ssid, to_native(error)))
+ return True
+ return False
+
+ def expand_volume(self):
+ """Expand the storage specifications for the existing thick/thin volume.
+
+ :raise AnsibleFailJson when a thick/thin volume expansion request fails.
+ """
+ request_body = self.get_expand_volume_changes()
+ if request_body:
+ if self.volume_detail["thinProvisioned"]:
+ try:
+ rc, resp = self.request("storage-systems/%s/thin-volumes/%s/expand"
+ % (self.ssid, self.volume_detail["id"]), data=request_body, method="POST")
+ except Exception as err:
+ self.module.fail_json(msg="Failed to expand thin volume. Volume [%s]. Array Id [%s]. Error[%s]."
+ % (self.name, self.ssid, to_native(err)))
+ self.module.log("Thin volume specifications have been expanded.")
+
+ else:
+ try:
+ rc, resp = self.request(
+ "storage-systems/%s/volumes/%s/expand" % (self.ssid, self.volume_detail['id']),
+ data=request_body, method="POST")
+ except Exception as err:
+ self.module.fail_json(msg="Failed to expand volume. Volume [%s]. Array Id [%s]. Error[%s]."
+ % (self.name, self.ssid, to_native(err)))
+
+ self.module.log("Volume storage capacities have been expanded.")
+
+ def delete_volume(self):
+ """Delete existing thin/thick volume."""
+ if self.thin_provision:
+ try:
+ rc, resp = self.request("storage-systems/%s/thin-volumes/%s" % (self.ssid, self.volume_detail["id"]),
+ method="DELETE")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to delete thin volume. Volume [%s]. Array Id [%s]. Error[%s]."
+ % (self.name, self.ssid, to_native(error)))
+ self.module.log("Thin volume deleted [%s]." % self.name)
+ else:
+ try:
+ rc, resp = self.request("storage-systems/%s/volumes/%s" % (self.ssid, self.volume_detail["id"]),
+ method="DELETE")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to delete volume. Volume [%s]. Array Id [%s]. Error[%s]."
+ % (self.name, self.ssid, to_native(error)))
+ self.module.log("Volume deleted [%s]." % self.name)
+
+ def apply(self):
+ """Determine and apply any changes necessary to satisfy the specified criteria.
+
+ :raise AnsibleExitJson when completes successfully"""
+ change = False
+ msg = None
+
+ self.volume_detail = self.get_volume()
+ self.pool_detail = self.get_storage_pool()
+
+ if self.pool_detail and self.size_unit == "pct":
+ space_mb = round(float(self.pool_detail["totalRaidedSpace"]), -8) / 1024 ** 2 - 100
+ self.size_unit = "mb"
+ self.size_b = self.convert_to_aligned_bytes(space_mb * (self.size_percent / 100))
+
+ # Determine whether changes need to be applied to existing workload tags
+ if self.state == 'present' and self.update_workload_tags(check_mode=True):
+ change = True
+
+ # Determine if any changes need to be applied
+ if self.volume_detail:
+ if self.state == 'absent':
+ change = True
+
+ elif self.state == 'present':
+ # Must check the property changes first as it makes sure the segment size has no change before
+ # using the size to determine if the volume expansion is needed which will cause an irrelevant
+ # error message to show up.
+ if self.get_volume_property_changes() or self.get_expand_volume_changes():
+ change = True
+
+ elif self.state == 'present':
+ if self.thin_provision and (self.thin_volume_repo_size_b < 4 * 1024 ** 3 or
+ self.thin_volume_repo_size_b > 256 * 1024 ** 3 or
+ self.thin_volume_repo_size_b % (4 * 1024 ** 3) != 0):
+ self.module.fail_json(msg="The initial thin volume repository size must be between 4gb and 256gb in"
+ " increments of 4gb. Attempted size [%sg]."
+ % (self.thin_volume_repo_size_b * 1024 ** 3))
+ change = True
+
+ self.module.log("Update required: [%s]." % change)
+
+ # Apply any necessary changes
+ if change and not self.module.check_mode:
+ if self.state == 'present':
+ if self.update_workload_tags():
+ msg = "Workload tag change occurred."
+
+ if not self.volume_detail:
+ self.check_storage_pool_sufficiency()
+ self.create_volume()
+ self.update_volume_properties()
+ msg = msg[:-1] + " and volume [%s] was created." if msg else "Volume [%s] has been created."
+ else:
+ if self.update_volume_properties():
+ msg = "Volume [%s] properties were updated."
+
+ if self.get_expand_volume_changes():
+ self.expand_volume()
+ msg = msg[:-1] + " and was expanded." if msg else "Volume [%s] was expanded."
+
+ if self.wait_for_initialization:
+ self.module.log("Waiting for volume operation to complete.")
+ self.wait_for_volume_action()
+
+ elif self.state == 'absent':
+ self.delete_volume()
+ msg = "Volume [%s] has been deleted."
+
+ else:
+ msg = "Volume [%s] does not exist." if self.state == 'absent' else "Volume [%s] exists."
+
+ self.module.exit_json(msg=(msg % self.name if msg and "%s" in msg else msg), changed=change)
+
+
+def main():
+ volume = NetAppESeriesVolume()
+ volume.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_alerts.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_alerts.py
new file mode 100644
index 000000000..20c4dc57e
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_alerts.py
@@ -0,0 +1,286 @@
+#!/usr/bin/python
+
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['deprecated'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = """
+---
+module: netapp_e_alerts
+short_description: NetApp E-Series manage email notification settings
+description:
+ - Certain E-Series systems have the capability to send email notifications on potentially critical events.
+ - This module will allow the owner of the system to specify email recipients for these messages.
+version_added: '2.7'
+author: Michael Price (@lmprice)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.netapp.eseries
+options:
+ state:
+ description:
+ - Enable/disable the sending of email-based alerts.
+ default: enabled
+ required: false
+ type: str
+ choices:
+ - enabled
+ - disabled
+ server:
+ description:
+ - A fully qualified domain name, IPv4 address, or IPv6 address of a mail server.
+ - To use a fully qualified domain name, you must configure a DNS server on both controllers using
+ M(netapp_e_mgmt_interface).
+ - Required when I(state=enabled).
+ type: str
+ required: no
+ sender:
+ description:
+ - This is the sender that the recipient will see. It doesn't necessarily need to be a valid email account.
+ - Required when I(state=enabled).
+ type: str
+ required: no
+ contact:
+ description:
+ - Allows the owner to specify some free-form contact information to be included in the emails.
+ - This is typically utilized to provide a contact phone number.
+ type: str
+ required: no
+ recipients:
+ description:
+ - The email addresses that will receive the email notifications.
+ - Required when I(state=enabled).
+ type: list
+ required: no
+ test:
+ description:
+ - When a change is detected in the configuration, a test email will be sent.
+ - This may take a few minutes to process.
+ - Only applicable if I(state=enabled).
+ default: no
+ type: bool
+ log_path:
+ description:
+ - Path to a file on the Ansible control node to be used for debug logging
+ type: str
+ required: no
+notes:
+ - Check mode is supported.
+ - Alertable messages are a subset of messages shown by the Major Event Log (MEL), of the storage-system. Examples
+ of alertable messages include drive failures, failed controllers, loss of redundancy, and other warning/critical
+ events.
+ - This API is currently only supported with the Embedded Web Services API v2.0 and higher.
+"""
+
+EXAMPLES = """
+ - name: Enable email-based alerting
+ netapp_e_alerts:
+ state: enabled
+ sender: noreply@example.com
+ server: mail@example.com
+ contact: "Phone: 1-555-555-5555"
+ recipients:
+ - name1@example.com
+ - name2@example.com
+ api_url: "10.1.1.1:8443"
+ api_username: "admin"
+ api_password: "myPass"
+
+ - name: Disable alerting
+ netapp_e_alerts:
+ state: disabled
+ api_url: "10.1.1.1:8443"
+ api_username: "admin"
+ api_password: "myPass"
+"""
+
+RETURN = """
+msg:
+ description: Success message
+ returned: on success
+ type: str
+ sample: The settings have been updated.
+"""
+
+import json
+import logging
+from pprint import pformat
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.netapp import request, eseries_host_argument_spec
+from ansible.module_utils._text import to_native
+
+HEADERS = {
+ "Content-Type": "application/json",
+ "Accept": "application/json",
+}
+
+
+class Alerts(object):
+ def __init__(self):
+ argument_spec = eseries_host_argument_spec()
+ argument_spec.update(dict(
+ state=dict(type='str', required=False, default='enabled',
+ choices=['enabled', 'disabled']),
+ server=dict(type='str', required=False, ),
+ sender=dict(type='str', required=False, ),
+ contact=dict(type='str', required=False, ),
+ recipients=dict(type='list', required=False, ),
+ test=dict(type='bool', required=False, default=False, ),
+ log_path=dict(type='str', required=False),
+ ))
+
+ required_if = [
+ ['state', 'enabled', ['server', 'sender', 'recipients']]
+ ]
+
+ self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_if=required_if)
+ args = self.module.params
+ self.alerts = args['state'] == 'enabled'
+ self.server = args['server']
+ self.sender = args['sender']
+ self.contact = args['contact']
+ self.recipients = args['recipients']
+ self.test = args['test']
+
+ self.ssid = args['ssid']
+ self.url = args['api_url']
+ self.creds = dict(url_password=args['api_password'],
+ validate_certs=args['validate_certs'],
+ url_username=args['api_username'], )
+
+ self.check_mode = self.module.check_mode
+
+ log_path = args['log_path']
+
+ # logging setup
+ self._logger = logging.getLogger(self.__class__.__name__)
+
+ if log_path:
+ logging.basicConfig(
+ level=logging.DEBUG, filename=log_path, filemode='w',
+ format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s')
+
+ if not self.url.endswith('/'):
+ self.url += '/'
+
+ # Very basic validation on email addresses: xx@yy.zz
+ email = re.compile(r"[^@]+@[^@]+\.[^@]+")
+
+ if self.sender and not email.match(self.sender):
+ self.module.fail_json(msg="The sender (%s) provided is not a valid email address." % self.sender)
+
+ if self.recipients is not None:
+ for recipient in self.recipients:
+ if not email.match(recipient):
+ self.module.fail_json(msg="The recipient (%s) provided is not a valid email address." % recipient)
+
+ if len(self.recipients) < 1:
+ self.module.fail_json(msg="At least one recipient address must be specified.")
+
+ def get_configuration(self):
+ try:
+ (rc, result) = request(self.url + 'storage-systems/%s/device-alerts' % self.ssid, headers=HEADERS,
+ **self.creds)
+ self._logger.info("Current config: %s", pformat(result))
+ return result
+
+ except Exception as err:
+ self.module.fail_json(msg="Failed to retrieve the alerts configuration! Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(err)))
+
+ def update_configuration(self):
+ config = self.get_configuration()
+ update = False
+ body = dict()
+
+ if self.alerts:
+ body = dict(alertingEnabled=True)
+ if not config['alertingEnabled']:
+ update = True
+
+ body.update(emailServerAddress=self.server)
+ if config['emailServerAddress'] != self.server:
+ update = True
+
+ body.update(additionalContactInformation=self.contact, sendAdditionalContactInformation=True)
+ if self.contact and (self.contact != config['additionalContactInformation']
+ or not config['sendAdditionalContactInformation']):
+ update = True
+
+ body.update(emailSenderAddress=self.sender)
+ if config['emailSenderAddress'] != self.sender:
+ update = True
+
+ self.recipients.sort()
+ if config['recipientEmailAddresses']:
+ config['recipientEmailAddresses'].sort()
+
+ body.update(recipientEmailAddresses=self.recipients)
+ if config['recipientEmailAddresses'] != self.recipients:
+ update = True
+
+ elif config['alertingEnabled']:
+ body = dict(alertingEnabled=False)
+ update = True
+
+ self._logger.debug(pformat(body))
+
+ if update and not self.check_mode:
+ try:
+ (rc, result) = request(self.url + 'storage-systems/%s/device-alerts' % self.ssid, method='POST',
+ data=json.dumps(body), headers=HEADERS, **self.creds)
+ # This is going to catch cases like a connection failure
+ except Exception as err:
+ self.module.fail_json(msg="We failed to set the storage-system name! Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(err)))
+ return update
+
+ def send_test_email(self):
+ """Send a test email to verify that the provided configuration is valid and functional."""
+ if not self.check_mode:
+ try:
+ (rc, result) = request(self.url + 'storage-systems/%s/device-alerts/alert-email-test' % self.ssid,
+ timeout=300, method='POST', headers=HEADERS, **self.creds)
+
+ if result['response'] != 'emailSentOK':
+ self.module.fail_json(msg="The test email failed with status=[%s]! Array Id [%s]."
+ % (result['response'], self.ssid))
+
+ # This is going to catch cases like a connection failure
+ except Exception as err:
+ self.module.fail_json(msg="We failed to send the test email! Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(err)))
+
+ def update(self):
+ update = self.update_configuration()
+
+ if self.test and update:
+ self._logger.info("An update was detected and test=True, running a test.")
+ self.send_test_email()
+
+ if self.alerts:
+ msg = 'Alerting has been enabled using server=%s, sender=%s.' % (self.server, self.sender)
+ else:
+ msg = 'Alerting has been disabled.'
+
+ self.module.exit_json(msg=msg, changed=update, )
+
+ def __call__(self, *args, **kwargs):
+ self.update()
+
+
+def main():
+ alerts = Alerts()
+ alerts()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_amg.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_amg.py
new file mode 100644
index 000000000..e2bfa4193
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_amg.py
@@ -0,0 +1,268 @@
+#!/usr/bin/python
+# (c) 2016, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['deprecated'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = """
+---
+module: netapp_e_amg
+short_description: NetApp E-Series create, remove, and update asynchronous mirror groups
+description:
+ - Allows for the creation, removal and updating of Asynchronous Mirror Groups for NetApp E-series storage arrays
+version_added: '2.2'
+author: Kevin Hulquest (@hulquest)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.netapp.eseries
+options:
+ name:
+ description:
+ - The name of the async array you wish to target, or create.
+ - If C(state) is present and the name isn't found, it will attempt to create.
+ type: str
+ required: yes
+ new_name:
+ description:
+ - New async array name
+ type: str
+ required: no
+ secondaryArrayId:
+ description:
+ - The ID of the secondary array to be used in mirroring process
+ type: str
+ required: yes
+ syncIntervalMinutes:
+ description:
+ - The synchronization interval in minutes
+ type: int
+ default: 10
+ manualSync:
+ description:
+ - Setting this to true will cause other synchronization values to be ignored
+ type: bool
+ default: 'no'
+ recoveryWarnThresholdMinutes:
+ description:
+ - Recovery point warning threshold (minutes). The user will be warned when the age of the last good failures point exceeds this value
+ type: int
+ default: 20
+ repoUtilizationWarnThreshold:
+ description:
+ - Recovery point warning threshold
+ type: int
+ default: 80
+ interfaceType:
+ description:
+ - The intended protocol to use if both Fibre and iSCSI are available.
+ type: str
+ choices:
+ - iscsi
+ - fibre
+ syncWarnThresholdMinutes:
+ description:
+ - The threshold (in minutes) for notifying the user that periodic synchronization has taken too long to complete.
+ default: 10
+ type: int
+ state:
+ description:
+ - A C(state) of present will either create or update the async mirror group.
+ - A C(state) of absent will remove the async mirror group.
+ type: str
+ choices: [ absent, present ]
+ required: yes
+"""
+
+EXAMPLES = """
+ - name: AMG removal
+ na_eseries_amg:
+ state: absent
+ ssid: "{{ ssid }}"
+ secondaryArrayId: "{{amg_secondaryArrayId}}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ new_name: "{{amg_array_name}}"
+ name: "{{amg_name}}"
+ when: amg_create
+
+ - name: AMG create
+ netapp_e_amg:
+ state: present
+ ssid: "{{ ssid }}"
+ secondaryArrayId: "{{amg_secondaryArrayId}}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ new_name: "{{amg_array_name}}"
+ name: "{{amg_name}}"
+ when: amg_create
+"""
+
+RETURN = """
+msg:
+ description: Successful creation
+ returned: success
+ type: str
+ sample: '{"changed": true, "connectionType": "fc", "groupRef": "3700000060080E5000299C24000006E857AC7EEC", "groupState": "optimal", "id": "3700000060080E5000299C24000006E857AC7EEC", "label": "amg_made_by_ansible", "localRole": "primary", "mirrorChannelRemoteTarget": "9000000060080E5000299C24005B06E557AC7EEC", "orphanGroup": false, "recoveryPointAgeAlertThresholdMinutes": 20, "remoteRole": "secondary", "remoteTarget": {"nodeName": {"ioInterfaceType": "fc", "iscsiNodeName": null, "remoteNodeWWN": "20040080E5299F1C"}, "remoteRef": "9000000060080E5000299C24005B06E557AC7EEC", "scsiinitiatorTargetBaseProperties": {"ioInterfaceType": "fc", "iscsiinitiatorTargetBaseParameters": null}}, "remoteTargetId": "ansible2", "remoteTargetName": "Ansible2", "remoteTargetWwn": "60080E5000299F880000000056A25D56", "repositoryUtilizationWarnThreshold": 80, "roleChangeProgress": "none", "syncActivity": "idle", "syncCompletionTimeAlertThresholdMinutes": 10, "syncIntervalMinutes": 10, "worldWideName": "60080E5000299C24000006E857AC7EEC"}'
+""" # NOQA
+
+import json
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.netapp import request, eseries_host_argument_spec
+
+
+HEADERS = {
+ "Content-Type": "application/json",
+ "Accept": "application/json",
+}
+
+
+def has_match(module, ssid, api_url, api_pwd, api_usr, body):
+ compare_keys = ['syncIntervalMinutes', 'syncWarnThresholdMinutes',
+ 'recoveryWarnThresholdMinutes', 'repoUtilizationWarnThreshold']
+ desired_state = dict((x, (body.get(x))) for x in compare_keys)
+ label_exists = False
+ matches_spec = False
+ current_state = None
+ async_id = None
+ api_data = None
+ desired_name = body.get('name')
+ endpoint = 'storage-systems/%s/async-mirrors' % ssid
+ url = api_url + endpoint
+ try:
+ rc, data = request(url, url_username=api_usr, url_password=api_pwd, headers=HEADERS)
+ except Exception as e:
+ module.exit_json(msg="Error finding a match. Message: %s" % to_native(e), exception=traceback.format_exc())
+
+ for async_group in data:
+ if async_group['label'] == desired_name:
+ label_exists = True
+ api_data = async_group
+ async_id = async_group['groupRef']
+ current_state = dict(
+ syncIntervalMinutes=async_group['syncIntervalMinutes'],
+ syncWarnThresholdMinutes=async_group['syncCompletionTimeAlertThresholdMinutes'],
+ recoveryWarnThresholdMinutes=async_group['recoveryPointAgeAlertThresholdMinutes'],
+ repoUtilizationWarnThreshold=async_group['repositoryUtilizationWarnThreshold'],
+ )
+
+ if current_state == desired_state:
+ matches_spec = True
+
+ return label_exists, matches_spec, api_data, async_id
+
+
+def create_async(module, ssid, api_url, api_pwd, api_usr, body):
+ endpoint = 'storage-systems/%s/async-mirrors' % ssid
+ url = api_url + endpoint
+ post_data = json.dumps(body)
+ try:
+ rc, data = request(url, data=post_data, method='POST', url_username=api_usr, url_password=api_pwd,
+ headers=HEADERS)
+ except Exception as e:
+ module.exit_json(msg="Exception while creating aysnc mirror group. Message: %s" % to_native(e),
+ exception=traceback.format_exc())
+ return data
+
+
+def update_async(module, ssid, api_url, pwd, user, body, new_name, async_id):
+ endpoint = 'storage-systems/%s/async-mirrors/%s' % (ssid, async_id)
+ url = api_url + endpoint
+ compare_keys = ['syncIntervalMinutes', 'syncWarnThresholdMinutes',
+ 'recoveryWarnThresholdMinutes', 'repoUtilizationWarnThreshold']
+ desired_state = dict((x, (body.get(x))) for x in compare_keys)
+
+ if new_name:
+ desired_state['new_name'] = new_name
+
+ post_data = json.dumps(desired_state)
+
+ try:
+ rc, data = request(url, data=post_data, method='POST', headers=HEADERS,
+ url_username=user, url_password=pwd)
+ except Exception as e:
+ module.exit_json(msg="Exception while updating async mirror group. Message: %s" % to_native(e),
+ exception=traceback.format_exc())
+
+ return data
+
+
+def remove_amg(module, ssid, api_url, pwd, user, async_id):
+ endpoint = 'storage-systems/%s/async-mirrors/%s' % (ssid, async_id)
+ url = api_url + endpoint
+ try:
+ rc, data = request(url, method='DELETE', url_username=user, url_password=pwd,
+ headers=HEADERS)
+ except Exception as e:
+ module.exit_json(msg="Exception while removing async mirror group. Message: %s" % to_native(e),
+ exception=traceback.format_exc())
+
+ return
+
+
+def main():
+ argument_spec = eseries_host_argument_spec()
+ argument_spec.update(dict(
+ name=dict(required=True, type='str'),
+ new_name=dict(required=False, type='str'),
+ secondaryArrayId=dict(required=True, type='str'),
+ syncIntervalMinutes=dict(required=False, default=10, type='int'),
+ manualSync=dict(required=False, default=False, type='bool'),
+ recoveryWarnThresholdMinutes=dict(required=False, default=20, type='int'),
+ repoUtilizationWarnThreshold=dict(required=False, default=80, type='int'),
+ interfaceType=dict(required=False, choices=['fibre', 'iscsi'], type='str'),
+ state=dict(required=True, choices=['present', 'absent']),
+ syncWarnThresholdMinutes=dict(required=False, default=10, type='int')
+ ))
+
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ p = module.params
+
+ ssid = p.pop('ssid')
+ api_url = p.pop('api_url')
+ user = p.pop('api_username')
+ pwd = p.pop('api_password')
+ new_name = p.pop('new_name')
+ state = p.pop('state')
+
+ if not api_url.endswith('/'):
+ api_url += '/'
+
+ name_exists, spec_matches, api_data, async_id = has_match(module, ssid, api_url, pwd, user, p)
+
+ if state == 'present':
+ if name_exists and spec_matches:
+ module.exit_json(changed=False, msg="Desired state met", **api_data)
+ elif name_exists and not spec_matches:
+ results = update_async(module, ssid, api_url, pwd, user,
+ p, new_name, async_id)
+ module.exit_json(changed=True,
+ msg="Async mirror group updated", async_id=async_id,
+ **results)
+ elif not name_exists:
+ results = create_async(module, ssid, api_url, user, pwd, p)
+ module.exit_json(changed=True, **results)
+
+ elif state == 'absent':
+ if name_exists:
+ remove_amg(module, ssid, api_url, pwd, user, async_id)
+ module.exit_json(changed=True, msg="Async mirror group removed.",
+ async_id=async_id)
+ else:
+ module.exit_json(changed=False,
+ msg="Async Mirror group: %s already absent" % p['name'])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_amg_role.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_amg_role.py
new file mode 100644
index 000000000..a67506f3f
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_amg_role.py
@@ -0,0 +1,244 @@
+#!/usr/bin/python
+
+# (c) 2016, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['deprecated'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = """
+---
+module: netapp_e_amg_role
+short_description: NetApp E-Series update the role of a storage array within an Asynchronous Mirror Group (AMG).
+description:
+ - Update a storage array to become the primary or secondary instance in an asynchronous mirror group
+version_added: '2.2'
+author: Kevin Hulquest (@hulquest)
+options:
+ api_username:
+ required: true
+ type: str
+ description:
+ - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_password:
+ required: true
+ type: str
+ description:
+ - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_url:
+ required: true
+ type: str
+ description:
+ - The url to the SANtricity WebServices Proxy or embedded REST API.
+ validate_certs:
+ required: false
+ default: true
+ description:
+ - Should https certificates be validated?
+ type: bool
+ ssid:
+ description:
+ - The ID of the primary storage array for the async mirror action
+ required: yes
+ type: str
+ name:
+ description:
+ - Name of the role
+ required: yes
+ type: str
+ role:
+ description:
+ - Whether the array should be the primary or secondary array for the AMG
+ required: yes
+ type: str
+ choices: ['primary', 'secondary']
+ noSync:
+ description:
+ - Whether to avoid synchronization prior to role reversal
+ required: no
+ default: no
+ type: bool
+ force:
+ description:
+ - Whether to force the role reversal regardless of the online-state of the primary
+ required: no
+ default: no
+ type: bool
+"""
+
+EXAMPLES = """
+ - name: Update the role of a storage array
+ netapp_e_amg_role:
+ name: updating amg role
+ role: primary
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+"""
+
+RETURN = """
+msg:
+ description: Failure message
+ returned: failure
+ type: str
+ sample: "No Async Mirror Group with the name."
+"""
+import json
+import traceback
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+from ansible.module_utils._text import to_native
+from ansible.module_utils.urls import open_url
+
+
+HEADERS = {
+ "Content-Type": "application/json",
+ "Accept": "application/json",
+}
+
+
+def request(url, data=None, headers=None, method='GET', use_proxy=True,
+ force=False, last_mod_time=None, timeout=10, validate_certs=True,
+ url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
+ try:
+ r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
+ force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
+ url_username=url_username, url_password=url_password, http_agent=http_agent,
+ force_basic_auth=force_basic_auth)
+ except HTTPError as e:
+ r = e.fp
+
+ try:
+ raw_data = r.read()
+ if raw_data:
+ data = json.loads(raw_data)
+ else:
+ raw_data = None
+ except Exception:
+ if ignore_errors:
+ pass
+ else:
+ raise Exception(raw_data)
+
+ resp_code = r.getcode()
+
+ if resp_code >= 400 and not ignore_errors:
+ raise Exception(resp_code, data)
+ else:
+ return resp_code, data
+
+
+def has_match(module, ssid, api_url, api_pwd, api_usr, body, name):
+ amg_exists = False
+ has_desired_role = False
+ amg_id = None
+ amg_data = None
+ get_amgs = 'storage-systems/%s/async-mirrors' % ssid
+ url = api_url + get_amgs
+ try:
+ amg_rc, amgs = request(url, url_username=api_usr, url_password=api_pwd,
+ headers=HEADERS)
+ except Exception:
+ module.fail_json(msg="Failed to find AMGs on storage array. Id [%s]" % (ssid))
+
+ for amg in amgs:
+ if amg['label'] == name:
+ amg_exists = True
+ amg_id = amg['id']
+ amg_data = amg
+ if amg['localRole'] == body.get('role'):
+ has_desired_role = True
+
+ return amg_exists, has_desired_role, amg_id, amg_data
+
+
+def update_amg(module, ssid, api_url, api_usr, api_pwd, body, amg_id):
+ endpoint = 'storage-systems/%s/async-mirrors/%s/role' % (ssid, amg_id)
+ url = api_url + endpoint
+ post_data = json.dumps(body)
+ try:
+ request(url, data=post_data, method='POST', url_username=api_usr,
+ url_password=api_pwd, headers=HEADERS)
+ except Exception as e:
+ module.fail_json(
+ msg="Failed to change role of AMG. Id [%s]. AMG Id [%s]. Error [%s]" % (ssid, amg_id, to_native(e)),
+ exception=traceback.format_exc())
+
+ status_endpoint = 'storage-systems/%s/async-mirrors/%s' % (ssid, amg_id)
+ status_url = api_url + status_endpoint
+ try:
+ rc, status = request(status_url, method='GET', url_username=api_usr,
+ url_password=api_pwd, headers=HEADERS)
+ except Exception as e:
+ module.fail_json(
+ msg="Failed to check status of AMG after role reversal. "
+ "Id [%s]. AMG Id [%s]. Error [%s]" % (ssid, amg_id, to_native(e)),
+ exception=traceback.format_exc())
+
+ # Here we wait for the role reversal to complete
+ if 'roleChangeProgress' in status:
+ while status['roleChangeProgress'] != "none":
+ try:
+ rc, status = request(status_url, method='GET',
+ url_username=api_usr, url_password=api_pwd, headers=HEADERS)
+ except Exception as e:
+ module.fail_json(
+ msg="Failed to check status of AMG after role reversal. "
+ "Id [%s]. AMG Id [%s]. Error [%s]" % (ssid, amg_id, to_native(e)),
+ exception=traceback.format_exc())
+ return status
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ name=dict(required=True, type='str'),
+ role=dict(required=True, choices=['primary', 'secondary']),
+ noSync=dict(required=False, type='bool', default=False),
+ force=dict(required=False, type='bool', default=False),
+ ssid=dict(required=True, type='str'),
+ api_url=dict(required=True),
+ api_username=dict(required=False),
+ api_password=dict(required=False, no_log=True),
+ ))
+
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ p = module.params
+
+ ssid = p.pop('ssid')
+ api_url = p.pop('api_url')
+ user = p.pop('api_username')
+ pwd = p.pop('api_password')
+ name = p.pop('name')
+
+ if not api_url.endswith('/'):
+ api_url += '/'
+
+ agm_exists, has_desired_role, async_id, amg_data = has_match(module, ssid, api_url, pwd, user, p, name)
+
+ if not agm_exists:
+ module.fail_json(msg="No Async Mirror Group with the name: '%s' was found" % name)
+ elif has_desired_role:
+ module.exit_json(changed=False, **amg_data)
+
+ else:
+ amg_data = update_amg(module, ssid, api_url, user, pwd, p, async_id)
+ if amg_data:
+ module.exit_json(changed=True, **amg_data)
+ else:
+ module.exit_json(changed=True, msg="AMG role changed.")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_amg_sync.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_amg_sync.py
new file mode 100644
index 000000000..056accd6b
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_amg_sync.py
@@ -0,0 +1,267 @@
+#!/usr/bin/python
+
+# (c) 2016, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['deprecated'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = """
+---
+module: netapp_e_amg_sync
+short_description: NetApp E-Series conduct synchronization actions on asynchronous mirror groups.
+description:
+ - Allows for the initialization, suspension and resumption of an asynchronous mirror group's synchronization for NetApp E-series storage arrays.
+version_added: '2.2'
+author: Kevin Hulquest (@hulquest)
+options:
+ api_username:
+ required: true
+ type: str
+ description:
+ - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_password:
+ required: true
+ type: str
+ description:
+ - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_url:
+ required: true
+ type: str
+ description:
+ - The url to the SANtricity WebServices Proxy or embedded REST API.
+ validate_certs:
+ required: false
+ default: true
+ description:
+ - Should https certificates be validated?
+ type: bool
+ ssid:
+ description:
+ - The ID of the storage array containing the AMG you wish to target
+ type: str
+ name:
+ description:
+ - The name of the async mirror group you wish to target
+ type: str
+ required: yes
+ state:
+ description:
+ - The synchronization action you'd like to take.
+ - If C(running) then it will begin syncing if there is no active sync or will resume a suspended sync. If there is already a sync in
+ progress, it will return with an OK status.
+ - If C(suspended) it will suspend any ongoing sync action, but return OK if there is no active sync or if the sync is already suspended
+ type: str
+ choices:
+ - running
+ - suspended
+ required: yes
+ delete_recovery_point:
+ description:
+ - Indicates whether the failures point can be deleted on the secondary if necessary to achieve the synchronization.
+ - If true, and if the amount of unsynchronized data exceeds the CoW repository capacity on the secondary for any member volume, the last
+ failures point will be deleted and synchronization will continue.
+ - If false, the synchronization will be suspended if the amount of unsynchronized data exceeds the CoW Repository capacity on the secondary
+ and the failures point will be preserved.
+ - "NOTE: This only has impact for newly launched syncs."
+ type: bool
+ default: no
+"""
+EXAMPLES = """
+ - name: start AMG async
+ netapp_e_amg_sync:
+ name: "{{ amg_sync_name }}"
+ state: running
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+"""
+RETURN = """
+json:
+ description: The object attributes of the AMG.
+ returned: success
+ type: str
+ example:
+ {
+ "changed": false,
+ "connectionType": "fc",
+ "groupRef": "3700000060080E5000299C24000006EF57ACAC70",
+ "groupState": "optimal",
+ "id": "3700000060080E5000299C24000006EF57ACAC70",
+ "label": "made_with_ansible",
+ "localRole": "primary",
+ "mirrorChannelRemoteTarget": "9000000060080E5000299C24005B06E557AC7EEC",
+ "orphanGroup": false,
+ "recoveryPointAgeAlertThresholdMinutes": 20,
+ "remoteRole": "secondary",
+ "remoteTarget": {
+ "nodeName": {
+ "ioInterfaceType": "fc",
+ "iscsiNodeName": null,
+ "remoteNodeWWN": "20040080E5299F1C"
+ },
+ "remoteRef": "9000000060080E5000299C24005B06E557AC7EEC",
+ "scsiinitiatorTargetBaseProperties": {
+ "ioInterfaceType": "fc",
+ "iscsiinitiatorTargetBaseParameters": null
+ }
+ },
+ "remoteTargetId": "ansible2",
+ "remoteTargetName": "Ansible2",
+ "remoteTargetWwn": "60080E5000299F880000000056A25D56",
+ "repositoryUtilizationWarnThreshold": 80,
+ "roleChangeProgress": "none",
+ "syncActivity": "idle",
+ "syncCompletionTimeAlertThresholdMinutes": 10,
+ "syncIntervalMinutes": 10,
+ "worldWideName": "60080E5000299C24000006EF57ACAC70"
+ }
+"""
+import json
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+from ansible.module_utils.urls import open_url
+
+
+def request(url, data=None, headers=None, method='GET', use_proxy=True,
+ force=False, last_mod_time=None, timeout=10, validate_certs=True,
+ url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
+ try:
+ r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
+ force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
+ url_username=url_username, url_password=url_password, http_agent=http_agent,
+ force_basic_auth=force_basic_auth)
+ except HTTPError as e:
+ r = e.fp
+
+ try:
+ raw_data = r.read()
+ if raw_data:
+ data = json.loads(raw_data)
+ else:
+ raw_data = None
+ except Exception:
+ if ignore_errors:
+ pass
+ else:
+ raise Exception(raw_data)
+
+ resp_code = r.getcode()
+
+ if resp_code >= 400 and not ignore_errors:
+ raise Exception(resp_code, data)
+ else:
+ return resp_code, data
+
+
+class AMGsync(object):
+ def __init__(self):
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ api_username=dict(type='str', required=True),
+ api_password=dict(type='str', required=True, no_log=True),
+ api_url=dict(type='str', required=True),
+ name=dict(required=True, type='str'),
+ ssid=dict(required=True, type='str'),
+ state=dict(required=True, type='str', choices=['running', 'suspended']),
+ delete_recovery_point=dict(required=False, type='bool', default=False)
+ ))
+ self.module = AnsibleModule(argument_spec=argument_spec)
+ args = self.module.params
+ self.name = args['name']
+ self.ssid = args['ssid']
+ self.state = args['state']
+ self.delete_recovery_point = args['delete_recovery_point']
+ try:
+ self.user = args['api_username']
+ self.pwd = args['api_password']
+ self.url = args['api_url']
+ except KeyError:
+ self.module.fail_json(msg="You must pass in api_username"
+ "and api_password and api_url to the module.")
+ self.certs = args['validate_certs']
+
+ self.post_headers = {
+ "Accept": "application/json",
+ "Content-Type": "application/json"
+ }
+ self.amg_id, self.amg_obj = self.get_amg()
+
+ def get_amg(self):
+ endpoint = self.url + '/storage-systems/%s/async-mirrors' % self.ssid
+ (rc, amg_objs) = request(endpoint, url_username=self.user, url_password=self.pwd, validate_certs=self.certs,
+ headers=self.post_headers)
+ try:
+ amg_id = filter(lambda d: d['label'] == self.name, amg_objs)[0]['id']
+ amg_obj = filter(lambda d: d['label'] == self.name, amg_objs)[0]
+ except IndexError:
+ self.module.fail_json(
+ msg="There is no async mirror group %s associated with storage array %s" % (self.name, self.ssid))
+ return amg_id, amg_obj
+
+ @property
+ def current_state(self):
+ amg_id, amg_obj = self.get_amg()
+ return amg_obj['syncActivity']
+
+ def run_sync_action(self):
+ # If we get to this point we know that the states differ, and there is no 'err' state,
+ # so no need to revalidate
+
+ post_body = dict()
+ if self.state == 'running':
+ if self.current_state == 'idle':
+ if self.delete_recovery_point:
+ post_body.update(dict(deleteRecoveryPointIfNecessary=self.delete_recovery_point))
+ suffix = 'sync'
+ else:
+ # In a suspended state
+ suffix = 'resume'
+ else:
+ suffix = 'suspend'
+
+ endpoint = self.url + "/storage-systems/%s/async-mirrors/%s/%s" % (self.ssid, self.amg_id, suffix)
+
+ (rc, resp) = request(endpoint, method='POST', url_username=self.user, url_password=self.pwd,
+ validate_certs=self.certs, data=json.dumps(post_body), headers=self.post_headers,
+ ignore_errors=True)
+
+ if not str(rc).startswith('2'):
+ self.module.fail_json(msg=str(resp['errorMessage']))
+
+ return resp
+
+ def apply(self):
+ state_map = dict(
+ running=['active'],
+ suspended=['userSuspended', 'internallySuspended', 'paused'],
+ err=['unkown', '_UNDEFINED'])
+
+ if self.current_state not in state_map[self.state]:
+ if self.current_state in state_map['err']:
+ self.module.fail_json(
+ msg="The sync is a state of '%s', this requires manual intervention. " +
+ "Please investigate and try again" % self.current_state)
+ else:
+ self.amg_obj = self.run_sync_action()
+
+ (ret, amg) = self.get_amg()
+ self.module.exit_json(changed=False, **amg)
+
+
+def main():
+ sync = AMGsync()
+ sync.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_asup.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_asup.py
new file mode 100644
index 000000000..f039626af
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_asup.py
@@ -0,0 +1,314 @@
+#!/usr/bin/python
+
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['deprecated'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = """
+---
+module: netapp_e_asup
+short_description: NetApp E-Series manage auto-support settings
+description:
+ - Allow the auto-support settings to be configured for an individual E-Series storage-system
+version_added: '2.7'
+author: Michael Price (@lmprice)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.netapp.eseries
+options:
+ state:
+ description:
+ - Enable/disable the E-Series auto-support configuration.
+ - When this option is enabled, configuration, logs, and other support-related information will be relayed
+ to NetApp to help better support your system. No personally identifiable information, passwords, etc, will
+ be collected.
+ default: enabled
+ type: str
+ choices:
+ - enabled
+ - disabled
+ aliases:
+ - asup
+ - auto_support
+ - autosupport
+ active:
+ description:
+ - Enable active/proactive monitoring for ASUP. When a problem is detected by our monitoring systems, it's
+ possible that the bundle did not contain all of the required information at the time of the event.
+ Enabling this option allows NetApp support personnel to manually request transmission or re-transmission
+ of support data in order ot resolve the problem.
+ - Only applicable if I(state=enabled).
+ default: yes
+ type: bool
+ start:
+ description:
+ - A start hour may be specified in a range from 0 to 23 hours.
+ - ASUP bundles will be sent daily between the provided start and end time (UTC).
+ - I(start) must be less than I(end).
+ aliases:
+ - start_time
+ default: 0
+ type: int
+ end:
+ description:
+ - An end hour may be specified in a range from 1 to 24 hours.
+ - ASUP bundles will be sent daily between the provided start and end time (UTC).
+ - I(start) must be less than I(end).
+ aliases:
+ - end_time
+ default: 24
+ type: int
+ days:
+ description:
+ - A list of days of the week that ASUP bundles will be sent. A larger, weekly bundle will be sent on one
+ of the provided days.
+ choices:
+ - monday
+ - tuesday
+ - wednesday
+ - thursday
+ - friday
+ - saturday
+ - sunday
+ required: no
+ type: list
+ aliases:
+ - days_of_week
+ - schedule_days
+ verbose:
+ description:
+ - Provide the full ASUP configuration in the return.
+ default: no
+ required: no
+ type: bool
+ log_path:
+ description:
+ - A local path to a file to be used for debug logging
+ type: str
+ required: no
+notes:
+ - Check mode is supported.
+ - Enabling ASUP will allow our support teams to monitor the logs of the storage-system in order to proactively
+ respond to issues with the system. It is recommended that all ASUP-related options be enabled, but they may be
+ disabled if desired.
+ - This API is currently only supported with the Embedded Web Services API v2.0 and higher.
+"""
+
+EXAMPLES = """
+ - name: Enable ASUP and allow pro-active retrieval of bundles
+ netapp_e_asup:
+ state: enabled
+ active: yes
+ api_url: "10.1.1.1:8443"
+ api_username: "admin"
+ api_password: "myPass"
+
+ - name: Set the ASUP schedule to only send bundles from 12 AM CST to 3 AM CST.
+ netapp_e_asup:
+ start: 17
+ end: 20
+ api_url: "10.1.1.1:8443"
+ api_username: "admin"
+ api_password: "myPass"
+"""
+
+RETURN = """
+msg:
+ description: Success message
+ returned: on success
+ type: str
+ sample: The settings have been updated.
+asup:
+ description:
+ - True if ASUP is enabled.
+ returned: on success
+ sample: True
+ type: bool
+active:
+ description:
+ - True if the active option has been enabled.
+ returned: on success
+ sample: True
+ type: bool
+cfg:
+ description:
+ - Provide the full ASUP configuration.
+ returned: on success when I(verbose=true).
+ type: complex
+ contains:
+ asupEnabled:
+ description:
+ - True if ASUP has been enabled.
+ type: bool
+ onDemandEnabled:
+ description:
+ - True if ASUP active monitoring has been enabled.
+ type: bool
+ daysOfWeek:
+ description:
+ - The days of the week that ASUP bundles will be sent.
+ type: list
+"""
+
+import json
+import logging
+from pprint import pformat
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.netapp import request, eseries_host_argument_spec
+from ansible.module_utils._text import to_native
+
+HEADERS = {
+ "Content-Type": "application/json",
+ "Accept": "application/json",
+}
+
+
+class Asup(object):
+ DAYS_OPTIONS = ['sunday', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday']
+
+ def __init__(self):
+ argument_spec = eseries_host_argument_spec()
+ argument_spec.update(dict(
+ state=dict(type='str', required=False, default='enabled', aliases=['asup', 'auto_support', 'autosupport'],
+ choices=['enabled', 'disabled']),
+ active=dict(type='bool', required=False, default=True, ),
+ days=dict(type='list', required=False, aliases=['schedule_days', 'days_of_week'],
+ choices=self.DAYS_OPTIONS),
+ start=dict(type='int', required=False, default=0, aliases=['start_time']),
+ end=dict(type='int', required=False, default=24, aliases=['end_time']),
+ verbose=dict(type='bool', required=False, default=False),
+ log_path=dict(type='str', required=False),
+ ))
+
+ self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, )
+ args = self.module.params
+ self.asup = args['state'] == 'enabled'
+ self.active = args['active']
+ self.days = args['days']
+ self.start = args['start']
+ self.end = args['end']
+ self.verbose = args['verbose']
+
+ self.ssid = args['ssid']
+ self.url = args['api_url']
+ self.creds = dict(url_password=args['api_password'],
+ validate_certs=args['validate_certs'],
+ url_username=args['api_username'], )
+
+ self.check_mode = self.module.check_mode
+
+ log_path = args['log_path']
+
+ # logging setup
+ self._logger = logging.getLogger(self.__class__.__name__)
+
+ if log_path:
+ logging.basicConfig(
+ level=logging.DEBUG, filename=log_path, filemode='w',
+ format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s')
+
+ if not self.url.endswith('/'):
+ self.url += '/'
+
+ if self.start >= self.end:
+ self.module.fail_json(msg="The value provided for the start time is invalid."
+ " It must be less than the end time.")
+ if self.start < 0 or self.start > 23:
+ self.module.fail_json(msg="The value provided for the start time is invalid. It must be between 0 and 23.")
+ else:
+ self.start = self.start * 60
+ if self.end < 1 or self.end > 24:
+ self.module.fail_json(msg="The value provided for the end time is invalid. It must be between 1 and 24.")
+ else:
+ self.end = min(self.end * 60, 1439)
+
+ if not self.days:
+ self.days = self.DAYS_OPTIONS
+
+ def get_configuration(self):
+ try:
+ (rc, result) = request(self.url + 'device-asup', headers=HEADERS, **self.creds)
+
+ if not (result['asupCapable'] and result['onDemandCapable']):
+ self.module.fail_json(msg="ASUP is not supported on this device. Array Id [%s]." % (self.ssid))
+ return result
+
+ except Exception as err:
+ self.module.fail_json(msg="Failed to retrieve ASUP configuration! Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(err)))
+
+ def update_configuration(self):
+ config = self.get_configuration()
+ update = False
+ body = dict()
+
+ if self.asup:
+ body = dict(asupEnabled=True)
+ if not config['asupEnabled']:
+ update = True
+
+ if (config['onDemandEnabled'] and config['remoteDiagsEnabled']) != self.active:
+ update = True
+ body.update(dict(onDemandEnabled=self.active,
+ remoteDiagsEnabled=self.active))
+ self.days.sort()
+ config['schedule']['daysOfWeek'].sort()
+
+ body['schedule'] = dict(daysOfWeek=self.days,
+ dailyMinTime=self.start,
+ dailyMaxTime=self.end,
+ weeklyMinTime=self.start,
+ weeklyMaxTime=self.end)
+
+ if self.days != config['schedule']['daysOfWeek']:
+ update = True
+ if self.start != config['schedule']['dailyMinTime'] or self.start != config['schedule']['weeklyMinTime']:
+ update = True
+ elif self.end != config['schedule']['dailyMaxTime'] or self.end != config['schedule']['weeklyMaxTime']:
+ update = True
+
+ elif config['asupEnabled']:
+ body = dict(asupEnabled=False)
+ update = True
+
+ self._logger.info(pformat(body))
+
+ if update and not self.check_mode:
+ try:
+ (rc, result) = request(self.url + 'device-asup', method='POST',
+ data=json.dumps(body), headers=HEADERS, **self.creds)
+ # This is going to catch cases like a connection failure
+ except Exception as err:
+ self.module.fail_json(msg="We failed to set the storage-system name! Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(err)))
+
+ return update
+
+ def update(self):
+ update = self.update_configuration()
+ cfg = self.get_configuration()
+ if self.verbose:
+ self.module.exit_json(msg="The ASUP settings have been updated.", changed=update,
+ asup=cfg['asupEnabled'], active=cfg['onDemandEnabled'], cfg=cfg)
+ else:
+ self.module.exit_json(msg="The ASUP settings have been updated.", changed=update,
+ asup=cfg['asupEnabled'], active=cfg['onDemandEnabled'])
+
+ def __call__(self, *args, **kwargs):
+ self.update()
+
+
+def main():
+ settings = Asup()
+ settings()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_auditlog.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_auditlog.py
new file mode 100644
index 000000000..814a72d34
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_auditlog.py
@@ -0,0 +1,286 @@
+#!/usr/bin/python
+
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['deprecated'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = """
+---
+module: netapp_e_auditlog
+short_description: NetApp E-Series manage audit-log configuration
+description:
+ - This module allows an e-series storage system owner to set audit-log configuration parameters.
+version_added: '2.7'
+author: Nathan Swartz (@ndswartz)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.netapp.eseries
+options:
+ max_records:
+ description:
+ - The maximum number log messages audit-log will retain.
+ - Max records must be between and including 100 and 50000.
+ default: 50000
+ type: int
+ log_level:
+ description: Filters the log messages according to the specified log level selection.
+ choices:
+ - all
+ - writeOnly
+ default: writeOnly
+ type: str
+ full_policy:
+ description: Specifies what audit-log should do once the number of entries approach the record limit.
+ choices:
+ - overWrite
+ - preventSystemAccess
+ default: overWrite
+ type: str
+ threshold:
+ description:
+ - This is the memory full percent threshold that audit-log will start issuing warning messages.
+ - Percent range must be between and including 60 and 90.
+ default: 90
+ type: int
+ force:
+ description:
+ - Forces the audit-log configuration to delete log history when log messages fullness cause immediate
+ warning or full condition.
+ - Warning! This will cause any existing audit-log messages to be deleted.
+ - This is only applicable for I(full_policy=preventSystemAccess).
+ type: bool
+ default: no
+ log_path:
+ description: A local path to a file to be used for debug logging.
+ required: no
+ type: str
+notes:
+ - Check mode is supported.
+ - This module is currently only supported with the Embedded Web Services API v3.0 and higher.
+"""
+
+EXAMPLES = """
+- name: Define audit-log to prevent system access if records exceed 50000 with warnings occurring at 60% capacity.
+ netapp_e_auditlog:
+ api_url: "https://{{ netapp_e_api_host }}/devmgr/v2"
+ api_username: "{{ netapp_e_api_username }}"
+ api_password: "{{ netapp_e_api_password }}"
+ ssid: "{{ netapp_e_ssid }}"
+ validate_certs: no
+ max_records: 50000
+ log_level: all
+ full_policy: preventSystemAccess
+ threshold: 60
+ log_path: /path/to/log_file.log
+- name: Define audit-log utilize the default values.
+ netapp_e_auditlog:
+ api_url: "https://{{ netapp_e_api_host }}/devmgr/v2"
+ api_username: "{{ netapp_e_api_username }}"
+ api_password: "{{ netapp_e_api_password }}"
+ ssid: "{{ netapp_e_ssid }}"
+- name: Force audit-log configuration when full or warning conditions occur while enacting preventSystemAccess policy.
+ netapp_e_auditlog:
+ api_url: "https://{{ netapp_e_api_host }}/devmgr/v2"
+ api_username: "{{ netapp_e_api_username }}"
+ api_password: "{{ netapp_e_api_password }}"
+ ssid: "{{ netapp_e_ssid }}"
+ max_records: 5000
+ log_level: all
+ full_policy: preventSystemAccess
+ threshold: 60
+ force: yes
+"""
+
+RETURN = """
+msg:
+ description: Success message
+ returned: on success
+ type: str
+ sample: The settings have been updated.
+"""
+
+import json
+import logging
+from pprint import pformat
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.netapp import request, eseries_host_argument_spec
+from ansible.module_utils._text import to_native
+
+try:
+ from urlparse import urlparse, urlunparse
+except Exception:
+ from urllib.parse import urlparse, urlunparse
+
+
+class AuditLog(object):
+ """Audit-log module configuration class."""
+ MAX_RECORDS = 50000
+ HEADERS = {"Content-Type": "application/json",
+ "Accept": "application/json"}
+
+ def __init__(self):
+ argument_spec = eseries_host_argument_spec()
+ argument_spec.update(dict(
+ max_records=dict(type="int", default=50000),
+ log_level=dict(type="str", default="writeOnly", choices=["all", "writeOnly"]),
+ full_policy=dict(type="str", default="overWrite", choices=["overWrite", "preventSystemAccess"]),
+ threshold=dict(type="int", default=90),
+ force=dict(type="bool", default=False),
+ log_path=dict(type='str', required=False)))
+
+ self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+ args = self.module.params
+
+ self.max_records = args["max_records"]
+ if self.max_records < 100 or self.max_records > self.MAX_RECORDS:
+ self.module.fail_json(msg="Audit-log max_records count must be between 100 and 50000: [%s]"
+ % self.max_records)
+ self.threshold = args["threshold"]
+ if self.threshold < 60 or self.threshold > 90:
+ self.module.fail_json(msg="Audit-log percent threshold must be between 60 and 90: [%s]" % self.threshold)
+ self.log_level = args["log_level"]
+ self.full_policy = args["full_policy"]
+ self.force = args["force"]
+ self.ssid = args['ssid']
+ self.url = args['api_url']
+ if not self.url.endswith('/'):
+ self.url += '/'
+ self.creds = dict(url_password=args['api_password'],
+ validate_certs=args['validate_certs'],
+ url_username=args['api_username'], )
+
+ # logging setup
+ log_path = args['log_path']
+ self._logger = logging.getLogger(self.__class__.__name__)
+
+ if log_path:
+ logging.basicConfig(
+ level=logging.DEBUG, filename=log_path, filemode='w',
+ format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s')
+
+ self.proxy_used = self.is_proxy()
+ self._logger.info(self.proxy_used)
+ self.check_mode = self.module.check_mode
+
+ def is_proxy(self):
+ """Determine whether the API is embedded or proxy."""
+ try:
+
+ # replace http url path with devmgr/utils/about
+ about_url = list(urlparse(self.url))
+ about_url[2] = "devmgr/utils/about"
+ about_url = urlunparse(about_url)
+
+ rc, data = request(about_url, timeout=300, headers=self.HEADERS, **self.creds)
+
+ return data["runningAsProxy"]
+ except Exception as err:
+ self.module.fail_json(msg="Failed to retrieve the webservices about information! Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(err)))
+
+ def get_configuration(self):
+ """Retrieve the existing audit-log configurations.
+
+ :returns: dictionary containing current audit-log configuration
+ """
+ try:
+ if self.proxy_used:
+ rc, data = request(self.url + "audit-log/config", timeout=300, headers=self.HEADERS, **self.creds)
+ else:
+ rc, data = request(self.url + "storage-systems/%s/audit-log/config" % self.ssid,
+ timeout=300, headers=self.HEADERS, **self.creds)
+ return data
+ except Exception as err:
+ self.module.fail_json(msg="Failed to retrieve the audit-log configuration! "
+ "Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(err)))
+
+ def build_configuration(self):
+ """Build audit-log expected configuration.
+
+ :returns: Tuple containing update boolean value and dictionary of audit-log configuration
+ """
+ config = self.get_configuration()
+
+ current = dict(auditLogMaxRecords=config["auditLogMaxRecords"],
+ auditLogLevel=config["auditLogLevel"],
+ auditLogFullPolicy=config["auditLogFullPolicy"],
+ auditLogWarningThresholdPct=config["auditLogWarningThresholdPct"])
+
+ body = dict(auditLogMaxRecords=self.max_records,
+ auditLogLevel=self.log_level,
+ auditLogFullPolicy=self.full_policy,
+ auditLogWarningThresholdPct=self.threshold)
+
+ update = current != body
+
+ self._logger.info(pformat(update))
+ self._logger.info(pformat(body))
+ return update, body
+
+ def delete_log_messages(self):
+ """Delete all audit-log messages."""
+ self._logger.info("Deleting audit-log messages...")
+ try:
+ if self.proxy_used:
+ rc, result = request(self.url + "audit-log?clearAll=True", timeout=300,
+ method="DELETE", headers=self.HEADERS, **self.creds)
+ else:
+ rc, result = request(self.url + "storage-systems/%s/audit-log?clearAll=True" % self.ssid, timeout=300,
+ method="DELETE", headers=self.HEADERS, **self.creds)
+ except Exception as err:
+ self.module.fail_json(msg="Failed to delete audit-log messages! Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(err)))
+
+ def update_configuration(self, update=None, body=None, attempt_recovery=True):
+ """Update audit-log configuration."""
+ if update is None or body is None:
+ update, body = self.build_configuration()
+
+ if update and not self.check_mode:
+ try:
+ if self.proxy_used:
+ rc, result = request(self.url + "storage-systems/audit-log/config", timeout=300,
+ data=json.dumps(body), method='POST', headers=self.HEADERS,
+ ignore_errors=True, **self.creds)
+ else:
+ rc, result = request(self.url + "storage-systems/%s/audit-log/config" % self.ssid, timeout=300,
+ data=json.dumps(body), method='POST', headers=self.HEADERS,
+ ignore_errors=True, **self.creds)
+
+ if rc == 422:
+ if self.force and attempt_recovery:
+ self.delete_log_messages()
+ update = self.update_configuration(update, body, False)
+ else:
+ self.module.fail_json(msg="Failed to update audit-log configuration! Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(rc, result)))
+
+ except Exception as error:
+ self.module.fail_json(msg="Failed to update audit-log configuration! Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(error)))
+ return update
+
+ def update(self):
+ """Update the audit-log configuration."""
+ update = self.update_configuration()
+ self.module.exit_json(msg="Audit-log update complete", changed=update)
+
+ def __call__(self):
+ self.update()
+
+
+def main():
+ auditlog = AuditLog()
+ auditlog()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_auth.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_auth.py
new file mode 100644
index 000000000..ac5c14c06
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_auth.py
@@ -0,0 +1,283 @@
+#!/usr/bin/python
+
+# (c) 2016, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['deprecated'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = '''
+---
+module: netapp_e_auth
+short_description: NetApp E-Series set or update the password for a storage array.
+description:
+ - Sets or updates the password for a storage array. When the password is updated on the storage array, it must be updated on the SANtricity Web
+ Services proxy. Note, all storage arrays do not have a Monitor or RO role.
+version_added: "2.2"
+author: Kevin Hulquest (@hulquest)
+options:
+ validate_certs:
+ required: false
+ default: true
+ description:
+ - Should https certificates be validated?
+ type: bool
+ name:
+ description:
+ - The name of the storage array. Note that if more than one storage array with this name is detected, the task will fail and you'll have to use
+ the ID instead.
+ required: False
+ type: str
+ ssid:
+ description:
+ - the identifier of the storage array in the Web Services Proxy.
+ required: False
+ type: str
+ set_admin:
+ description:
+ - Boolean value on whether to update the admin password. If set to false then the RO account is updated.
+ type: bool
+ default: False
+ current_password:
+ description:
+ - The current admin password. This is not required if the password hasn't been set before.
+ required: False
+ type: str
+ new_password:
+ description:
+ - The password you would like to set. Cannot be more than 30 characters.
+ required: True
+ type: str
+ api_url:
+ description:
+ - The full API url.
+ - "Example: http://ENDPOINT:8080/devmgr/v2"
+ - This can optionally be set via an environment variable, API_URL
+ required: False
+ type: str
+ api_username:
+ description:
+ - The username used to authenticate against the API
+ - This can optionally be set via an environment variable, API_USERNAME
+ required: False
+ type: str
+ api_password:
+ description:
+ - The password used to authenticate against the API
+ - This can optionally be set via an environment variable, API_PASSWORD
+ required: False
+ type: str
+'''
+
+EXAMPLES = '''
+- name: Test module
+ netapp_e_auth:
+ name: trex
+ current_password: OldPasswd
+ new_password: NewPasswd
+ set_admin: yes
+ api_url: '{{ netapp_api_url }}'
+ api_username: '{{ netapp_api_username }}'
+ api_password: '{{ netapp_api_password }}'
+'''
+
+RETURN = '''
+msg:
+ description: Success message
+ returned: success
+ type: str
+ sample: "Password Updated Successfully"
+'''
+import json
+import traceback
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+from ansible.module_utils._text import to_native
+from ansible.module_utils.urls import open_url
+
+HEADERS = {
+ "Content-Type": "application/json",
+ "Accept": "application/json",
+ "x-netapp-password-validate-method": "none"
+
+}
+
+
+def request(url, data=None, headers=None, method='GET', use_proxy=True,
+ force=False, last_mod_time=None, timeout=10, validate_certs=True,
+ url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
+ try:
+ r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
+ force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
+ url_username=url_username, url_password=url_password, http_agent=http_agent,
+ force_basic_auth=force_basic_auth)
+ except HTTPError as e:
+ r = e.fp
+
+ try:
+ raw_data = r.read()
+ if raw_data:
+ data = json.loads(raw_data)
+ else:
+ raw_data = None
+ except Exception:
+ if ignore_errors:
+ pass
+ else:
+ raise Exception(raw_data)
+
+ resp_code = r.getcode()
+
+ if resp_code >= 400 and not ignore_errors:
+ raise Exception(resp_code, data)
+ else:
+ return resp_code, data
+
+
+def get_ssid(module, name, api_url, user, pwd):
+ count = 0
+ all_systems = 'storage-systems'
+ systems_url = api_url + all_systems
+ rc, data = request(systems_url, headers=HEADERS, url_username=user, url_password=pwd,
+ validate_certs=module.validate_certs)
+ for system in data:
+ if system['name'] == name:
+ count += 1
+ if count > 1:
+ module.fail_json(
+ msg="You supplied a name for the Storage Array but more than 1 array was found with that name. " +
+ "Use the id instead")
+ else:
+ ssid = system['id']
+ else:
+ continue
+
+ if count == 0:
+ module.fail_json(msg="No storage array with the name %s was found" % name)
+
+ else:
+ return ssid
+
+
+def get_pwd_status(module, ssid, api_url, user, pwd):
+ pwd_status = "storage-systems/%s/passwords" % ssid
+ url = api_url + pwd_status
+ try:
+ rc, data = request(url, headers=HEADERS, url_username=user, url_password=pwd,
+ validate_certs=module.validate_certs)
+ return data['readOnlyPasswordSet'], data['adminPasswordSet']
+ except HTTPError as e:
+ module.fail_json(msg="There was an issue with connecting, please check that your "
+ "endpoint is properly defined and your credentials are correct: %s" % to_native(e))
+
+
+def update_storage_system_pwd(module, ssid, pwd, api_url, api_usr, api_pwd):
+ """Update the stored storage-system password"""
+ update_pwd = 'storage-systems/%s' % ssid
+ url = api_url + update_pwd
+ post_body = json.dumps(dict(storedPassword=pwd))
+ try:
+ rc, data = request(url, data=post_body, method='POST', headers=HEADERS, url_username=api_usr,
+ url_password=api_pwd, validate_certs=module.validate_certs)
+ return rc, data
+ except Exception as e:
+ module.fail_json(msg="Failed to update system password. Id [%s]. Error [%s]" % (ssid, to_native(e)))
+
+
+def set_password(module, ssid, api_url, user, pwd, current_password=None, new_password=None, set_admin=False):
+ """Set the storage-system password"""
+ set_pass = "storage-systems/%s/passwords" % ssid
+ url = api_url + set_pass
+
+ if not current_password:
+ current_password = ""
+
+ post_body = json.dumps(
+ dict(currentAdminPassword=current_password, adminPassword=set_admin, newPassword=new_password))
+
+ try:
+ rc, data = request(url, method='POST', data=post_body, headers=HEADERS, url_username=user, url_password=pwd,
+ ignore_errors=True, validate_certs=module.validate_certs)
+ except Exception as e:
+ module.fail_json(msg="Failed to set system password. Id [%s]. Error [%s]" % (ssid, to_native(e)),
+ exception=traceback.format_exc())
+
+ if rc == 422:
+ post_body = json.dumps(dict(currentAdminPassword='', adminPassword=set_admin, newPassword=new_password))
+ try:
+ rc, data = request(url, method='POST', data=post_body, headers=HEADERS, url_username=user, url_password=pwd,
+ validate_certs=module.validate_certs)
+ except Exception:
+ # TODO(lorenp): Resolve ignored rc, data
+ module.fail_json(msg="Wrong or no admin password supplied. Please update your playbook and try again")
+
+ if int(rc) >= 300:
+ module.fail_json(msg="Failed to set system password. Id [%s] Code [%s]. Error [%s]" % (ssid, rc, data))
+
+ rc, update_data = update_storage_system_pwd(module, ssid, new_password, api_url, user, pwd)
+
+ if int(rc) < 300:
+ return update_data
+ else:
+ module.fail_json(msg="%s:%s" % (rc, update_data))
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ name=dict(required=False, type='str'),
+ ssid=dict(required=False, type='str'),
+ current_password=dict(required=False, no_log=True),
+ new_password=dict(required=True, no_log=True),
+ set_admin=dict(required=True, type='bool'),
+ api_url=dict(required=True),
+ api_username=dict(required=False),
+ api_password=dict(required=False, no_log=True)
+ )
+ )
+ module = AnsibleModule(argument_spec=argument_spec, mutually_exclusive=[['name', 'ssid']],
+ required_one_of=[['name', 'ssid']])
+
+ name = module.params['name']
+ ssid = module.params['ssid']
+ current_password = module.params['current_password']
+ new_password = module.params['new_password']
+ set_admin = module.params['set_admin']
+ user = module.params['api_username']
+ pwd = module.params['api_password']
+ api_url = module.params['api_url']
+ module.validate_certs = module.params['validate_certs']
+
+ if not api_url.endswith('/'):
+ api_url += '/'
+
+ if name:
+ ssid = get_ssid(module, name, api_url, user, pwd)
+
+ ro_pwd, admin_pwd = get_pwd_status(module, ssid, api_url, user, pwd)
+
+ if admin_pwd and not current_password:
+ module.fail_json(
+ msg="Admin account has a password set. " +
+ "You must supply current_password in order to update the RO or Admin passwords")
+
+ if len(new_password) > 30:
+ module.fail_json(msg="Passwords must not be greater than 30 characters in length")
+
+ result = set_password(module, ssid, api_url, user, pwd, current_password=current_password,
+ new_password=new_password, set_admin=set_admin)
+
+ module.exit_json(changed=True, msg="Password Updated Successfully",
+ password_set=result['passwordSet'],
+ password_status=result['passwordStatus'])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_drive_firmware.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_drive_firmware.py
new file mode 100644
index 000000000..e74bac776
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_drive_firmware.py
@@ -0,0 +1,215 @@
+#!/usr/bin/python
+
+# (c) 2016, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['deprecated'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = """
+---
+module: netapp_e_drive_firmware
+version_added: "2.9"
+short_description: NetApp E-Series manage drive firmware
+description:
+ - Ensure drive firmware version is activated on specified drive model.
+author:
+ - Nathan Swartz (@ndswartz)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.netapp.eseries
+options:
+ firmware:
+ description:
+ - list of drive firmware file paths.
+ - NetApp E-Series drives require special firmware which can be downloaded from https://mysupport.netapp.com/NOW/download/tools/diskfw_eseries/
+ type: list
+ required: True
+ wait_for_completion:
+ description:
+ - This flag will cause module to wait for any upgrade actions to complete.
+ type: bool
+ default: false
+ ignore_inaccessible_drives:
+ description:
+ - This flag will determine whether drive firmware upgrade should fail if any affected drives are inaccessible.
+ type: bool
+ default: false
+ upgrade_drives_online:
+ description:
+ - This flag will determine whether drive firmware can be upgrade while drives are accepting I/O.
+ - When I(upgrade_drives_online==False) stop all I/O before running task.
+ type: bool
+ default: true
+"""
+EXAMPLES = """
+- name: Ensure correct firmware versions
+ nac_santricity_drive_firmware:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ firmware: "path/to/drive_firmware"
+ wait_for_completion: true
+ ignore_inaccessible_drives: false
+"""
+RETURN = """
+msg:
+ description: Whether any drive firmware was upgraded and whether it is in progress.
+ type: str
+ returned: always
+ sample:
+ { changed: True, upgrade_in_process: True }
+"""
+import os
+import re
+
+from time import sleep
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.netapp import NetAppESeriesModule, create_multipart_formdata
+from ansible.module_utils._text import to_native, to_text, to_bytes
+
+
+class NetAppESeriesDriveFirmware(NetAppESeriesModule):
+ WAIT_TIMEOUT_SEC = 60 * 15
+
+ def __init__(self):
+ ansible_options = dict(
+ firmware=dict(type="list", required=True),
+ wait_for_completion=dict(type="bool", default=False),
+ ignore_inaccessible_drives=dict(type="bool", default=False),
+ upgrade_drives_online=dict(type="bool", default=True))
+
+ super(NetAppESeriesDriveFirmware, self).__init__(ansible_options=ansible_options,
+ web_services_version="02.00.0000.0000",
+ supports_check_mode=True)
+
+ args = self.module.params
+ self.firmware_list = args["firmware"]
+ self.wait_for_completion = args["wait_for_completion"]
+ self.ignore_inaccessible_drives = args["ignore_inaccessible_drives"]
+ self.upgrade_drives_online = args["upgrade_drives_online"]
+
+ self.upgrade_list_cache = None
+
+ self.upgrade_required_cache = None
+ self.upgrade_in_progress = False
+ self.drive_info_cache = None
+
+ def upload_firmware(self):
+ """Ensure firmware has been upload prior to uploaded."""
+ for firmware in self.firmware_list:
+ firmware_name = os.path.basename(firmware)
+ files = [("file", firmware_name, firmware)]
+ headers, data = create_multipart_formdata(files)
+ try:
+ rc, response = self.request("/files/drive", method="POST", headers=headers, data=data)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to upload drive firmware [%s]. Array [%s]. Error [%s]." % (firmware_name, self.ssid, to_native(error)))
+
+ def upgrade_list(self):
+ """Determine whether firmware is compatible with the specified drives."""
+ if self.upgrade_list_cache is None:
+ self.upgrade_list_cache = list()
+ try:
+ rc, response = self.request("storage-systems/%s/firmware/drives" % self.ssid)
+
+ # Create upgrade list, this ensures only the firmware uploaded is applied
+ for firmware in self.firmware_list:
+ filename = os.path.basename(firmware)
+
+ for uploaded_firmware in response["compatibilities"]:
+ if uploaded_firmware["filename"] == filename:
+
+ # Determine whether upgrade is required
+ drive_reference_list = []
+ for drive in uploaded_firmware["compatibleDrives"]:
+ try:
+ rc, drive_info = self.request("storage-systems/%s/drives/%s" % (self.ssid, drive["driveRef"]))
+
+ # Add drive references that are supported and differ from current firmware
+ if (drive_info["firmwareVersion"] != uploaded_firmware["firmwareVersion"] and
+ uploaded_firmware["firmwareVersion"] in uploaded_firmware["supportedFirmwareVersions"]):
+
+ if self.ignore_inaccessible_drives or (not drive_info["offline"] and drive_info["available"]):
+ drive_reference_list.append(drive["driveRef"])
+
+ if not drive["onlineUpgradeCapable"] and self.upgrade_drives_online:
+ self.module.fail_json(msg="Drive is not capable of online upgrade. Array [%s]. Drive [%s]."
+ % (self.ssid, drive["driveRef"]))
+
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve drive information. Array [%s]. Drive [%s]. Error [%s]."
+ % (self.ssid, drive["driveRef"], to_native(error)))
+
+ if drive_reference_list:
+ self.upgrade_list_cache.extend([{"filename": filename, "driveRefList": drive_reference_list}])
+
+ except Exception as error:
+ self.module.fail_json(msg="Failed to complete compatibility and health check. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ return self.upgrade_list_cache
+
+ def wait_for_upgrade_completion(self):
+ """Wait for drive firmware upgrade to complete."""
+ drive_references = [reference for drive in self.upgrade_list() for reference in drive["driveRefList"]]
+ last_status = None
+ for attempt in range(int(self.WAIT_TIMEOUT_SEC / 5)):
+ try:
+ rc, response = self.request("storage-systems/%s/firmware/drives/state" % self.ssid)
+
+ # Check drive status
+ for status in response["driveStatus"]:
+ last_status = status
+ if status["driveRef"] in drive_references:
+ if status["status"] == "okay":
+ continue
+ elif status["status"] in ["inProgress", "inProgressRecon", "pending", "notAttempted"]:
+ break
+ else:
+ self.module.fail_json(msg="Drive firmware upgrade failed. Array [%s]. Drive [%s]. Status [%s]."
+ % (self.ssid, status["driveRef"], status["status"]))
+ else:
+ self.upgrade_in_progress = False
+ break
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve drive status. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ sleep(5)
+ else:
+ self.module.fail_json(msg="Timed out waiting for drive firmware upgrade. Array [%s]. Status [%s]." % (self.ssid, last_status))
+
+ def upgrade(self):
+ """Apply firmware to applicable drives."""
+ try:
+ rc, response = self.request("storage-systems/%s/firmware/drives/initiate-upgrade?onlineUpdate=%s"
+ % (self.ssid, "true" if self.upgrade_drives_online else "false"), method="POST", data=self.upgrade_list())
+ self.upgrade_in_progress = True
+ except Exception as error:
+ self.module.fail_json(msg="Failed to upgrade drive firmware. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ if self.wait_for_completion:
+ self.wait_for_upgrade_completion()
+
+ def apply(self):
+ """Apply firmware policy has been enforced on E-Series storage system."""
+ self.upload_firmware()
+
+ if self.upgrade_list() and not self.module.check_mode:
+ self.upgrade()
+
+ self.module.exit_json(changed=True if self.upgrade_list() else False,
+ upgrade_in_process=self.upgrade_in_progress)
+
+
+def main():
+ drive_firmware = NetAppESeriesDriveFirmware()
+ drive_firmware.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_facts.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_facts.py
new file mode 100644
index 000000000..3734a477e
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_facts.py
@@ -0,0 +1,530 @@
+#!/usr/bin/python
+
+# (c) 2016, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['deprecated'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = '''
+module: netapp_e_facts
+short_description: NetApp E-Series retrieve facts about NetApp E-Series storage arrays
+description:
+ - The netapp_e_facts module returns a collection of facts regarding NetApp E-Series storage arrays.
+version_added: '2.2'
+author:
+ - Kevin Hulquest (@hulquest)
+ - Nathan Swartz (@ndswartz)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.netapp.eseries
+'''
+
+EXAMPLES = """
+---
+- name: Get array facts
+ netapp_e_facts:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+"""
+
+RETURN = """
+ msg:
+ description: Success message
+ returned: on success
+ type: str
+ sample:
+ - Gathered facts for storage array. Array ID [1].
+ - Gathered facts for web services proxy.
+ storage_array_facts:
+ description: provides details about the array, controllers, management interfaces, hostside interfaces,
+ driveside interfaces, disks, storage pools, volumes, snapshots, and features.
+ returned: on successful inquiry from from embedded web services rest api
+ type: complex
+ contains:
+ netapp_controllers:
+ description: storage array controller list that contains basic controller identification and status
+ type: complex
+ sample:
+ - [{"name": "A", "serial": "021632007299", "status": "optimal"},
+ {"name": "B", "serial": "021632007300", "status": "failed"}]
+ netapp_disks:
+ description: drive list that contains identification, type, and status information for each drive
+ type: complex
+ sample:
+ - [{"available": false,
+ "firmware_version": "MS02",
+ "id": "01000000500003960C8B67880000000000000000",
+ "media_type": "ssd",
+ "product_id": "PX02SMU080 ",
+ "serial_number": "15R0A08LT2BA",
+ "status": "optimal",
+ "tray_ref": "0E00000000000000000000000000000000000000",
+ "usable_bytes": "799629205504" }]
+ netapp_driveside_interfaces:
+ description: drive side interface list that contains identification, type, and speed for each interface
+ type: complex
+ sample:
+ - [{ "controller": "A", "interface_speed": "12g", "interface_type": "sas" }]
+ - [{ "controller": "B", "interface_speed": "10g", "interface_type": "iscsi" }]
+ netapp_enabled_features:
+ description: specifies the enabled features on the storage array.
+ returned: on success
+ type: complex
+ sample:
+ - [ "flashReadCache", "performanceTier", "protectionInformation", "secureVolume" ]
+ netapp_host_groups:
+ description: specifies the host groups on the storage arrays.
+ returned: on success
+ type: complex
+ sample:
+ - [{ "id": "85000000600A098000A4B28D003610705C40B964", "name": "group1" }]
+ netapp_hosts:
+ description: specifies the hosts on the storage arrays.
+ returned: on success
+ type: complex
+ sample:
+ - [{ "id": "8203800000000000000000000000000000000000",
+ "name": "host1",
+ "group_id": "85000000600A098000A4B28D003610705C40B964",
+ "host_type_index": 28,
+ "ports": [{ "type": "fc", "address": "1000FF7CFFFFFF01", "label": "FC_1" },
+ { "type": "fc", "address": "1000FF7CFFFFFF00", "label": "FC_2" }]}]
+ netapp_host_types:
+ description: lists the available host types on the storage array.
+ returned: on success
+ type: complex
+ sample:
+ - [{ "index": 0, "type": "FactoryDefault" },
+ { "index": 1, "type": "W2KNETNCL"},
+ { "index": 2, "type": "SOL" },
+ { "index": 5, "type": "AVT_4M" },
+ { "index": 6, "type": "LNX" },
+ { "index": 7, "type": "LnxALUA" },
+ { "index": 8, "type": "W2KNETCL" },
+ { "index": 9, "type": "AIX MPIO" },
+ { "index": 10, "type": "VmwTPGSALUA" },
+ { "index": 15, "type": "HPXTPGS" },
+ { "index": 17, "type": "SolTPGSALUA" },
+ { "index": 18, "type": "SVC" },
+ { "index": 22, "type": "MacTPGSALUA" },
+ { "index": 23, "type": "WinTPGSALUA" },
+ { "index": 24, "type": "LnxTPGSALUA" },
+ { "index": 25, "type": "LnxTPGSALUA_PM" },
+ { "index": 26, "type": "ONTAP_ALUA" },
+ { "index": 27, "type": "LnxTPGSALUA_SF" },
+ { "index": 28, "type": "LnxDHALUA" },
+ { "index": 29, "type": "ATTOClusterAllOS" }]
+ netapp_hostside_interfaces:
+ description: host side interface list that contains identification, configuration, type, speed, and
+ status information for each interface
+ type: complex
+ sample:
+ - [{"iscsi":
+ [{ "controller": "A",
+ "current_interface_speed": "10g",
+ "ipv4_address": "10.10.10.1",
+ "ipv4_enabled": true,
+ "ipv4_gateway": "10.10.10.1",
+ "ipv4_subnet_mask": "255.255.255.0",
+ "ipv6_enabled": false,
+ "iqn": "iqn.1996-03.com.netapp:2806.600a098000a81b6d0000000059d60c76",
+ "link_status": "up",
+ "mtu": 9000,
+ "supported_interface_speeds": [ "10g" ] }]}]
+ netapp_management_interfaces:
+ description: management interface list that contains identification, configuration, and status for
+ each interface
+ type: complex
+ sample:
+ - [{"alias": "ict-2800-A",
+ "channel": 1,
+ "controller": "A",
+ "dns_config_method": "dhcp",
+ "dns_servers": [],
+ "ipv4_address": "10.1.1.1",
+ "ipv4_address_config_method": "static",
+ "ipv4_enabled": true,
+ "ipv4_gateway": "10.113.1.1",
+ "ipv4_subnet_mask": "255.255.255.0",
+ "ipv6_enabled": false,
+ "link_status": "up",
+ "mac_address": "00A098A81B5D",
+ "name": "wan0",
+ "ntp_config_method": "disabled",
+ "ntp_servers": [],
+ "remote_ssh_access": false }]
+ netapp_storage_array:
+ description: provides storage array identification, firmware version, and available capabilities
+ type: dict
+ sample:
+ - {"chassis_serial": "021540006043",
+ "firmware": "08.40.00.01",
+ "name": "ict-2800-11_40",
+ "wwn": "600A098000A81B5D0000000059D60C76",
+ "cacheBlockSizes": [4096,
+ 8192,
+ 16384,
+ 32768],
+ "supportedSegSizes": [8192,
+ 16384,
+ 32768,
+ 65536,
+ 131072,
+ 262144,
+ 524288]}
+ netapp_storage_pools:
+ description: storage pool list that contains identification and capacity information for each pool
+ type: complex
+ sample:
+ - [{"available_capacity": "3490353782784",
+ "id": "04000000600A098000A81B5D000002B45A953A61",
+ "name": "Raid6",
+ "total_capacity": "5399466745856",
+ "used_capacity": "1909112963072" }]
+ netapp_volumes:
+ description: storage volume list that contains identification and capacity information for each volume
+ type: complex
+ sample:
+ - [{"capacity": "5368709120",
+ "id": "02000000600A098000AAC0C3000002C45A952BAA",
+ "is_thin_provisioned": false,
+ "name": "5G",
+ "parent_storage_pool_id": "04000000600A098000A81B5D000002B45A953A61" }]
+ netapp_workload_tags:
+ description: workload tag list
+ type: complex
+ sample:
+ - [{"id": "87e19568-43fb-4d8d-99ea-2811daaa2b38",
+ "name": "ftp_server",
+ "workloadAttributes": [{"key": "use",
+ "value": "general"}]}]
+ netapp_volumes_by_initiators:
+ description: list of available volumes keyed by the mapped initiators.
+ type: complex
+ sample:
+ - {"192_168_1_1": [{"id": "02000000600A098000A4B9D1000015FD5C8F7F9E",
+ "meta_data": {"filetype": "xfs", "public": true},
+ "name": "some_volume",
+ "workload_name": "test2_volumes",
+ "wwn": "600A098000A4B9D1000015FD5C8F7F9E"}]}
+ snapshot_images:
+ description: snapshot image list that contains identification, capacity, and status information for each
+ snapshot image
+ type: complex
+ sample:
+ - [{"active_cow": true,
+ "creation_method": "user",
+ "id": "34000000600A098000A81B5D00630A965B0535AC",
+ "pit_capacity": "5368709120",
+ "reposity_cap_utilization": "0",
+ "rollback_source": false,
+ "status": "optimal" }]
+"""
+
+from re import match
+from pprint import pformat
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.netapp import NetAppESeriesModule
+
+
+class Facts(NetAppESeriesModule):
+ def __init__(self):
+ web_services_version = "02.00.0000.0000"
+ super(Facts, self).__init__(ansible_options={},
+ web_services_version=web_services_version,
+ supports_check_mode=True)
+
+ def get_controllers(self):
+ """Retrieve a mapping of controller references to their labels."""
+ controllers = list()
+ try:
+ rc, controllers = self.request('storage-systems/%s/graph/xpath-filter?query=/controller/id' % self.ssid)
+ except Exception as err:
+ self.module.fail_json(
+ msg="Failed to retrieve controller list! Array Id [%s]. Error [%s]."
+ % (self.ssid, str(err)))
+
+ controllers.sort()
+
+ controllers_dict = {}
+ i = ord('A')
+ for controller in controllers:
+ label = chr(i)
+ controllers_dict[controller] = label
+ i += 1
+
+ return controllers_dict
+
+ def get_array_facts(self):
+ """Extract particular facts from the storage array graph"""
+ facts = dict(facts_from_proxy=(not self.is_embedded()), ssid=self.ssid)
+ controller_reference_label = self.get_controllers()
+ array_facts = None
+
+ # Get the storage array graph
+ try:
+ rc, array_facts = self.request("storage-systems/%s/graph" % self.ssid)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to obtain facts from storage array with id [%s]. Error [%s]" % (self.ssid, str(error)))
+
+ facts['netapp_storage_array'] = dict(
+ name=array_facts['sa']['saData']['storageArrayLabel'],
+ chassis_serial=array_facts['sa']['saData']['chassisSerialNumber'],
+ firmware=array_facts['sa']['saData']['fwVersion'],
+ wwn=array_facts['sa']['saData']['saId']['worldWideName'],
+ segment_sizes=array_facts['sa']['featureParameters']['supportedSegSizes'],
+ cache_block_sizes=array_facts['sa']['featureParameters']['cacheBlockSizes'])
+
+ facts['netapp_controllers'] = [
+ dict(
+ name=controller_reference_label[controller['controllerRef']],
+ serial=controller['serialNumber'].strip(),
+ status=controller['status'],
+ ) for controller in array_facts['controller']]
+
+ facts['netapp_host_groups'] = [
+ dict(
+ id=group['id'],
+ name=group['name']
+ ) for group in array_facts['storagePoolBundle']['cluster']]
+
+ facts['netapp_hosts'] = [
+ dict(
+ group_id=host['clusterRef'],
+ hosts_reference=host['hostRef'],
+ id=host['id'],
+ name=host['name'],
+ host_type_index=host['hostTypeIndex'],
+ posts=host['hostSidePorts']
+ ) for host in array_facts['storagePoolBundle']['host']]
+
+ facts['netapp_host_types'] = [
+ dict(
+ type=host_type['hostType'],
+ index=host_type['index']
+ ) for host_type in array_facts['sa']['hostSpecificVals']
+ if 'hostType' in host_type.keys() and host_type['hostType']
+ # This conditional ignores zero-length strings which indicates that the associated host-specific NVSRAM region has been cleared.
+ ]
+ facts['snapshot_images'] = [
+ dict(
+ id=snapshot['id'],
+ status=snapshot['status'],
+ pit_capacity=snapshot['pitCapacity'],
+ creation_method=snapshot['creationMethod'],
+ reposity_cap_utilization=snapshot['repositoryCapacityUtilization'],
+ active_cow=snapshot['activeCOW'],
+ rollback_source=snapshot['isRollbackSource']
+ ) for snapshot in array_facts['highLevelVolBundle']['pit']]
+
+ facts['netapp_disks'] = [
+ dict(
+ id=disk['id'],
+ available=disk['available'],
+ media_type=disk['driveMediaType'],
+ status=disk['status'],
+ usable_bytes=disk['usableCapacity'],
+ tray_ref=disk['physicalLocation']['trayRef'],
+ product_id=disk['productID'],
+ firmware_version=disk['firmwareVersion'],
+ serial_number=disk['serialNumber'].lstrip()
+ ) for disk in array_facts['drive']]
+
+ facts['netapp_management_interfaces'] = [
+ dict(controller=controller_reference_label[controller['controllerRef']],
+ name=iface['ethernet']['interfaceName'],
+ alias=iface['ethernet']['alias'],
+ channel=iface['ethernet']['channel'],
+ mac_address=iface['ethernet']['macAddr'],
+ remote_ssh_access=iface['ethernet']['rloginEnabled'],
+ link_status=iface['ethernet']['linkStatus'],
+ ipv4_enabled=iface['ethernet']['ipv4Enabled'],
+ ipv4_address_config_method=iface['ethernet']['ipv4AddressConfigMethod'].lower().replace("config", ""),
+ ipv4_address=iface['ethernet']['ipv4Address'],
+ ipv4_subnet_mask=iface['ethernet']['ipv4SubnetMask'],
+ ipv4_gateway=iface['ethernet']['ipv4GatewayAddress'],
+ ipv6_enabled=iface['ethernet']['ipv6Enabled'],
+ dns_config_method=iface['ethernet']['dnsProperties']['acquisitionProperties']['dnsAcquisitionType'],
+ dns_servers=(iface['ethernet']['dnsProperties']['acquisitionProperties']['dnsServers']
+ if iface['ethernet']['dnsProperties']['acquisitionProperties']['dnsServers'] else []),
+ ntp_config_method=iface['ethernet']['ntpProperties']['acquisitionProperties']['ntpAcquisitionType'],
+ ntp_servers=(iface['ethernet']['ntpProperties']['acquisitionProperties']['ntpServers']
+ if iface['ethernet']['ntpProperties']['acquisitionProperties']['ntpServers'] else [])
+ ) for controller in array_facts['controller'] for iface in controller['netInterfaces']]
+
+ facts['netapp_hostside_interfaces'] = [
+ dict(
+ fc=[dict(controller=controller_reference_label[controller['controllerRef']],
+ channel=iface['fibre']['channel'],
+ link_status=iface['fibre']['linkStatus'],
+ current_interface_speed=strip_interface_speed(iface['fibre']['currentInterfaceSpeed']),
+ maximum_interface_speed=strip_interface_speed(iface['fibre']['maximumInterfaceSpeed']))
+ for controller in array_facts['controller']
+ for iface in controller['hostInterfaces']
+ if iface['interfaceType'] == 'fc'],
+ ib=[dict(controller=controller_reference_label[controller['controllerRef']],
+ channel=iface['ib']['channel'],
+ link_status=iface['ib']['linkState'],
+ mtu=iface['ib']['maximumTransmissionUnit'],
+ current_interface_speed=strip_interface_speed(iface['ib']['currentSpeed']),
+ maximum_interface_speed=strip_interface_speed(iface['ib']['supportedSpeed']))
+ for controller in array_facts['controller']
+ for iface in controller['hostInterfaces']
+ if iface['interfaceType'] == 'ib'],
+ iscsi=[dict(controller=controller_reference_label[controller['controllerRef']],
+ iqn=iface['iscsi']['iqn'],
+ link_status=iface['iscsi']['interfaceData']['ethernetData']['linkStatus'],
+ ipv4_enabled=iface['iscsi']['ipv4Enabled'],
+ ipv4_address=iface['iscsi']['ipv4Data']['ipv4AddressData']['ipv4Address'],
+ ipv4_subnet_mask=iface['iscsi']['ipv4Data']['ipv4AddressData']['ipv4SubnetMask'],
+ ipv4_gateway=iface['iscsi']['ipv4Data']['ipv4AddressData']['ipv4GatewayAddress'],
+ ipv6_enabled=iface['iscsi']['ipv6Enabled'],
+ mtu=iface['iscsi']['interfaceData']['ethernetData']['maximumFramePayloadSize'],
+ current_interface_speed=strip_interface_speed(iface['iscsi']['interfaceData']
+ ['ethernetData']['currentInterfaceSpeed']),
+ supported_interface_speeds=strip_interface_speed(iface['iscsi']['interfaceData']
+ ['ethernetData']
+ ['supportedInterfaceSpeeds']))
+ for controller in array_facts['controller']
+ for iface in controller['hostInterfaces']
+ if iface['interfaceType'] == 'iscsi'],
+ sas=[dict(controller=controller_reference_label[controller['controllerRef']],
+ channel=iface['sas']['channel'],
+ current_interface_speed=strip_interface_speed(iface['sas']['currentInterfaceSpeed']),
+ maximum_interface_speed=strip_interface_speed(iface['sas']['maximumInterfaceSpeed']),
+ link_status=iface['sas']['iocPort']['state'])
+ for controller in array_facts['controller']
+ for iface in controller['hostInterfaces']
+ if iface['interfaceType'] == 'sas'])]
+
+ facts['netapp_driveside_interfaces'] = [
+ dict(
+ controller=controller_reference_label[controller['controllerRef']],
+ interface_type=interface['interfaceType'],
+ interface_speed=strip_interface_speed(
+ interface[interface['interfaceType']]['maximumInterfaceSpeed']
+ if (interface['interfaceType'] == 'sata' or
+ interface['interfaceType'] == 'sas' or
+ interface['interfaceType'] == 'fibre')
+ else (
+ interface[interface['interfaceType']]['currentSpeed']
+ if interface['interfaceType'] == 'ib'
+ else (
+ interface[interface['interfaceType']]['interfaceData']['maximumInterfaceSpeed']
+ if interface['interfaceType'] == 'iscsi' else 'unknown'
+ ))),
+ )
+ for controller in array_facts['controller']
+ for interface in controller['driveInterfaces']]
+
+ facts['netapp_storage_pools'] = [
+ dict(
+ id=storage_pool['id'],
+ name=storage_pool['name'],
+ available_capacity=storage_pool['freeSpace'],
+ total_capacity=storage_pool['totalRaidedSpace'],
+ used_capacity=storage_pool['usedSpace']
+ ) for storage_pool in array_facts['volumeGroup']]
+
+ all_volumes = list(array_facts['volume'])
+
+ facts['netapp_volumes'] = [
+ dict(
+ id=v['id'],
+ name=v['name'],
+ parent_storage_pool_id=v['volumeGroupRef'],
+ capacity=v['capacity'],
+ is_thin_provisioned=v['thinProvisioned'],
+ workload=v['metadata'],
+ ) for v in all_volumes]
+
+ workload_tags = None
+ try:
+ rc, workload_tags = self.request("storage-systems/%s/workloads" % self.ssid)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve workload tags. Array [%s]." % self.ssid)
+
+ facts['netapp_workload_tags'] = [
+ dict(
+ id=workload_tag['id'],
+ name=workload_tag['name'],
+ attributes=workload_tag['workloadAttributes']
+ ) for workload_tag in workload_tags]
+
+ # Create a dictionary of volume lists keyed by host names
+ facts['netapp_volumes_by_initiators'] = dict()
+ for mapping in array_facts['storagePoolBundle']['lunMapping']:
+ for host in facts['netapp_hosts']:
+ if mapping['mapRef'] == host['hosts_reference'] or mapping['mapRef'] == host['group_id']:
+ if host['name'] not in facts['netapp_volumes_by_initiators'].keys():
+ facts['netapp_volumes_by_initiators'].update({host['name']: []})
+
+ for volume in all_volumes:
+ if mapping['id'] in [volume_mapping['id'] for volume_mapping in volume['listOfMappings']]:
+
+ # Determine workload name if there is one
+ workload_name = ""
+ metadata = dict()
+ for volume_tag in volume['metadata']:
+ if volume_tag['key'] == 'workloadId':
+ for workload_tag in facts['netapp_workload_tags']:
+ if volume_tag['value'] == workload_tag['id']:
+ workload_name = workload_tag['name']
+ metadata = dict((entry['key'], entry['value'])
+ for entry in workload_tag['attributes']
+ if entry['key'] != 'profileId')
+
+ facts['netapp_volumes_by_initiators'][host['name']].append(
+ dict(name=volume['name'],
+ id=volume['id'],
+ wwn=volume['wwn'],
+ workload_name=workload_name,
+ meta_data=metadata))
+
+ features = [feature for feature in array_facts['sa']['capabilities']]
+ features.extend([feature['capability'] for feature in array_facts['sa']['premiumFeatures']
+ if feature['isEnabled']])
+ features = list(set(features)) # ensure unique
+ features.sort()
+ facts['netapp_enabled_features'] = features
+
+ return facts
+
+ def get_facts(self):
+ """Get the embedded or web services proxy information."""
+ facts = self.get_array_facts()
+
+ self.module.log("isEmbedded: %s" % self.is_embedded())
+ self.module.log(pformat(facts))
+
+ self.module.exit_json(msg="Gathered facts for storage array. Array ID: [%s]." % self.ssid,
+ storage_array_facts=facts)
+
+
+def strip_interface_speed(speed):
+ """Converts symbol interface speeds to a more common notation. Example: 'speed10gig' -> '10g'"""
+ if isinstance(speed, list):
+ result = [match(r"speed[0-9]{1,3}[gm]", sp) for sp in speed]
+ result = [sp.group().replace("speed", "") if result else "unknown" for sp in result if sp]
+ result = ["auto" if match(r"auto", sp) else sp for sp in result]
+ else:
+ result = match(r"speed[0-9]{1,3}[gm]", speed)
+ result = result.group().replace("speed", "") if result else "unknown"
+ result = "auto" if match(r"auto", result.lower()) else result
+ return result
+
+
+def main():
+ facts = Facts()
+ facts.get_facts()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_firmware.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_firmware.py
new file mode 100644
index 000000000..c2f7f7457
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_firmware.py
@@ -0,0 +1,488 @@
+#!/usr/bin/python
+
+# (c) 2016, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['deprecated'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = """
+---
+module: netapp_e_firmware
+version_added: "2.9"
+short_description: NetApp E-Series manage firmware.
+description:
+ - Ensure specific firmware versions are activated on E-Series storage system.
+author:
+ - Nathan Swartz (@ndswartz)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.netapp.eseries
+options:
+ nvsram:
+ description:
+ - Path to the NVSRAM file.
+ type: str
+ required: true
+ firmware:
+ description:
+ - Path to the firmware file.
+ type: str
+ required: true
+ wait_for_completion:
+ description:
+ - This flag will cause module to wait for any upgrade actions to complete.
+ type: bool
+ default: false
+ ignore_health_check:
+ description:
+ - This flag will force firmware to be activated in spite of the health check.
+ - Use at your own risk. Certain non-optimal states could result in data loss.
+ type: bool
+ default: false
+"""
+EXAMPLES = """
+- name: Ensure correct firmware versions
+ netapp_e_firmware:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ nvsram: "path/to/nvsram"
+ bundle: "path/to/bundle"
+ wait_for_completion: true
+- name: Ensure correct firmware versions
+ netapp_e_firmware:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ nvsram: "path/to/nvsram"
+ firmware: "path/to/firmware"
+"""
+RETURN = """
+msg:
+ description: Status and version of firmware and NVSRAM.
+ type: str
+ returned: always
+ sample:
+"""
+import os
+
+from time import sleep
+from ansible.module_utils import six
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.netapp import NetAppESeriesModule, create_multipart_formdata, request
+from ansible.module_utils._text import to_native, to_text, to_bytes
+
+
+class NetAppESeriesFirmware(NetAppESeriesModule):
+ HEALTH_CHECK_TIMEOUT_MS = 120000
+ REBOOT_TIMEOUT_SEC = 15 * 60
+ FIRMWARE_COMPATIBILITY_CHECK_TIMEOUT_SEC = 60
+ DEFAULT_TIMEOUT = 60 * 15 # This will override the NetAppESeriesModule request method timeout.
+
+ def __init__(self):
+ ansible_options = dict(
+ nvsram=dict(type="str", required=True),
+ firmware=dict(type="str", required=True),
+ wait_for_completion=dict(type="bool", default=False),
+ ignore_health_check=dict(type="bool", default=False))
+
+ super(NetAppESeriesFirmware, self).__init__(ansible_options=ansible_options,
+ web_services_version="02.00.0000.0000",
+ supports_check_mode=True)
+
+ args = self.module.params
+ self.nvsram = args["nvsram"]
+ self.firmware = args["firmware"]
+ self.wait_for_completion = args["wait_for_completion"]
+ self.ignore_health_check = args["ignore_health_check"]
+
+ self.nvsram_name = None
+ self.firmware_name = None
+ self.is_bundle_cache = None
+ self.firmware_version_cache = None
+ self.nvsram_version_cache = None
+ self.upgrade_required = False
+ self.upgrade_in_progress = False
+ self.module_info = dict()
+
+ self.nvsram_name = os.path.basename(self.nvsram)
+ self.firmware_name = os.path.basename(self.firmware)
+
+ def is_firmware_bundled(self):
+ """Determine whether supplied firmware is bundle."""
+ if self.is_bundle_cache is None:
+ with open(self.firmware, "rb") as fh:
+ signature = fh.read(16).lower()
+
+ if b"firmware" in signature:
+ self.is_bundle_cache = False
+ elif b"combined_content" in signature:
+ self.is_bundle_cache = True
+ else:
+ self.module.fail_json(msg="Firmware file is invalid. File [%s]. Array [%s]" % (self.firmware, self.ssid))
+
+ return self.is_bundle_cache
+
+ def firmware_version(self):
+ """Retrieve firmware version of the firmware file. Return: bytes string"""
+ if self.firmware_version_cache is None:
+
+ # Search firmware file for bundle or firmware version
+ with open(self.firmware, "rb") as fh:
+ line = fh.readline()
+ while line:
+ if self.is_firmware_bundled():
+ if b'displayableAttributeList=' in line:
+ for item in line[25:].split(b','):
+ key, value = item.split(b"|")
+ if key == b'VERSION':
+ self.firmware_version_cache = value.strip(b"\n")
+ break
+ elif b"Version:" in line:
+ self.firmware_version_cache = line.split()[-1].strip(b"\n")
+ break
+ line = fh.readline()
+ else:
+ self.module.fail_json(msg="Failed to determine firmware version. File [%s]. Array [%s]." % (self.firmware, self.ssid))
+ return self.firmware_version_cache
+
+ def nvsram_version(self):
+ """Retrieve NVSRAM version of the NVSRAM file. Return: byte string"""
+ if self.nvsram_version_cache is None:
+
+ with open(self.nvsram, "rb") as fh:
+ line = fh.readline()
+ while line:
+ if b".NVSRAM Configuration Number" in line:
+ self.nvsram_version_cache = line.split(b'"')[-2]
+ break
+ line = fh.readline()
+ else:
+ self.module.fail_json(msg="Failed to determine NVSRAM file version. File [%s]. Array [%s]." % (self.nvsram, self.ssid))
+ return self.nvsram_version_cache
+
+ def check_system_health(self):
+ """Ensure E-Series storage system is healthy. Works for both embedded and proxy web services."""
+ try:
+ rc, request_id = self.request("health-check", method="POST", data={"onlineOnly": True, "storageDeviceIds": [self.ssid]})
+
+ while True:
+ sleep(1)
+
+ try:
+ rc, response = self.request("health-check?requestId=%s" % request_id["requestId"])
+
+ if not response["healthCheckRunning"]:
+ return response["results"][0]["successful"]
+ elif int(response["results"][0]["processingTimeMS"]) > self.HEALTH_CHECK_TIMEOUT_MS:
+ self.module.fail_json(msg="Health check failed to complete. Array Id [%s]." % self.ssid)
+
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve health check status. Array Id [%s]. Error[%s]." % (self.ssid, to_native(error)))
+ except Exception as error:
+ self.module.fail_json(msg="Failed to initiate health check. Array Id [%s]. Error[%s]." % (self.ssid, to_native(error)))
+
+ self.module.fail_json(msg="Failed to retrieve health check status. Array Id [%s]. Error[%s]." % self.ssid)
+
+ def embedded_check_compatibility(self):
+ """Verify files are compatible with E-Series storage system."""
+ self.embedded_check_nvsram_compatibility()
+ self.embedded_check_bundle_compatibility()
+
+ def embedded_check_nvsram_compatibility(self):
+ """Verify the provided NVSRAM is compatible with E-Series storage system."""
+
+ # Check nvsram compatibility
+ try:
+ files = [("nvsramimage", self.nvsram_name, self.nvsram)]
+ headers, data = create_multipart_formdata(files=files)
+
+ rc, nvsram_compatible = self.request("firmware/embedded-firmware/%s/nvsram-compatibility-check" % self.ssid,
+ method="POST", data=data, headers=headers)
+
+ if not nvsram_compatible["signatureTestingPassed"]:
+ self.module.fail_json(msg="Invalid NVSRAM file. File [%s]." % self.nvsram)
+ if not nvsram_compatible["fileCompatible"]:
+ self.module.fail_json(msg="Incompatible NVSRAM file. File [%s]." % self.nvsram)
+
+ # Determine whether nvsram is required
+ for module in nvsram_compatible["versionContents"]:
+ if module["bundledVersion"] != module["onboardVersion"]:
+ self.upgrade_required = True
+
+ # Update bundle info
+ self.module_info.update({module["module"]: {"onboard_version": module["onboardVersion"], "bundled_version": module["bundledVersion"]}})
+
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve NVSRAM compatibility results. Array Id [%s]. Error[%s]." % (self.ssid, to_native(error)))
+
+ def embedded_check_bundle_compatibility(self):
+ """Verify the provided firmware bundle is compatible with E-Series storage system."""
+ try:
+ files = [("files[]", "blob", self.firmware)]
+ headers, data = create_multipart_formdata(files=files, send_8kb=True)
+ rc, bundle_compatible = self.request("firmware/embedded-firmware/%s/bundle-compatibility-check" % self.ssid,
+ method="POST", data=data, headers=headers)
+
+ # Determine whether valid and compatible firmware
+ if not bundle_compatible["signatureTestingPassed"]:
+ self.module.fail_json(msg="Invalid firmware bundle file. File [%s]." % self.firmware)
+ if not bundle_compatible["fileCompatible"]:
+ self.module.fail_json(msg="Incompatible firmware bundle file. File [%s]." % self.firmware)
+
+ # Determine whether upgrade is required
+ for module in bundle_compatible["versionContents"]:
+
+ bundle_module_version = module["bundledVersion"].split(".")
+ onboard_module_version = module["onboardVersion"].split(".")
+ version_minimum_length = min(len(bundle_module_version), len(onboard_module_version))
+ if bundle_module_version[:version_minimum_length] != onboard_module_version[:version_minimum_length]:
+ self.upgrade_required = True
+
+ # Check whether downgrade is being attempted
+ bundle_version = module["bundledVersion"].split(".")[:2]
+ onboard_version = module["onboardVersion"].split(".")[:2]
+ if bundle_version[0] < onboard_version[0] or (bundle_version[0] == onboard_version[0] and bundle_version[1] < onboard_version[1]):
+ self.module.fail_json(msg="Downgrades are not permitted. onboard [%s] > bundled[%s]."
+ % (module["onboardVersion"], module["bundledVersion"]))
+
+ # Update bundle info
+ self.module_info.update({module["module"]: {"onboard_version": module["onboardVersion"], "bundled_version": module["bundledVersion"]}})
+
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve bundle compatibility results. Array Id [%s]. Error[%s]." % (self.ssid, to_native(error)))
+
+ def embedded_wait_for_upgrade(self):
+ """Wait for SANtricity Web Services Embedded to be available after reboot."""
+ for count in range(0, self.REBOOT_TIMEOUT_SEC):
+ try:
+ rc, response = self.request("storage-systems/%s/graph/xpath-filter?query=/sa/saData" % self.ssid)
+ bundle_display = [m["versionString"] for m in response[0]["extendedSAData"]["codeVersions"] if m["codeModule"] == "bundleDisplay"][0]
+ if rc == 200 and six.b(bundle_display) == self.firmware_version() and six.b(response[0]["nvsramVersion"]) == self.nvsram_version():
+ self.upgrade_in_progress = False
+ break
+ except Exception as error:
+ pass
+ sleep(1)
+ else:
+ self.module.fail_json(msg="Timeout waiting for Santricity Web Services Embedded. Array [%s]" % self.ssid)
+
+ def embedded_upgrade(self):
+ """Upload and activate both firmware and NVSRAM."""
+ files = [("nvsramfile", self.nvsram_name, self.nvsram),
+ ("dlpfile", self.firmware_name, self.firmware)]
+ headers, data = create_multipart_formdata(files=files)
+ try:
+ rc, response = self.request("firmware/embedded-firmware?staged=false&nvsram=true", method="POST", data=data, headers=headers)
+ self.upgrade_in_progress = True
+ except Exception as error:
+ self.module.fail_json(msg="Failed to upload and activate firmware. Array Id [%s]. Error[%s]." % (self.ssid, to_native(error)))
+ if self.wait_for_completion:
+ self.embedded_wait_for_upgrade()
+
+ def proxy_check_nvsram_compatibility(self):
+ """Verify nvsram is compatible with E-Series storage system."""
+ data = {"storageDeviceIds": [self.ssid]}
+ try:
+ rc, check = self.request("firmware/compatibility-check", method="POST", data=data)
+ for count in range(0, int((self.FIRMWARE_COMPATIBILITY_CHECK_TIMEOUT_SEC / 5))):
+ sleep(5)
+ try:
+ rc, response = self.request("firmware/compatibility-check?requestId=%s" % check["requestId"])
+ if not response["checkRunning"]:
+ for result in response["results"][0]["nvsramFiles"]:
+ if result["filename"] == self.nvsram_name:
+ return
+ self.module.fail_json(msg="NVSRAM is not compatible. NVSRAM [%s]. Array [%s]." % (self.nvsram_name, self.ssid))
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve NVSRAM status update from proxy. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+ except Exception as error:
+ self.module.fail_json(msg="Failed to receive NVSRAM compatibility information. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ def proxy_check_firmware_compatibility(self):
+ """Verify firmware is compatible with E-Series storage system."""
+ data = {"storageDeviceIds": [self.ssid]}
+ try:
+ rc, check = self.request("firmware/compatibility-check", method="POST", data=data)
+ for count in range(0, int((self.FIRMWARE_COMPATIBILITY_CHECK_TIMEOUT_SEC / 5))):
+ sleep(5)
+ try:
+ rc, response = self.request("firmware/compatibility-check?requestId=%s" % check["requestId"])
+ if not response["checkRunning"]:
+ for result in response["results"][0]["cfwFiles"]:
+ if result["filename"] == self.firmware_name:
+ return
+ self.module.fail_json(msg="Firmware bundle is not compatible. firmware [%s]. Array [%s]." % (self.firmware_name, self.ssid))
+
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve firmware status update from proxy. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+ except Exception as error:
+ self.module.fail_json(msg="Failed to receive firmware compatibility information. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ def proxy_upload_and_check_compatibility(self):
+ """Ensure firmware is uploaded and verify compatibility."""
+ try:
+ rc, cfw_files = self.request("firmware/cfw-files")
+ for file in cfw_files:
+ if file["filename"] == self.nvsram_name:
+ break
+ else:
+ fields = [("validate", "true")]
+ files = [("firmwareFile", self.nvsram_name, self.nvsram)]
+ headers, data = create_multipart_formdata(files=files, fields=fields)
+ try:
+ rc, response = self.request("firmware/upload", method="POST", data=data, headers=headers)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to upload NVSRAM file. File [%s]. Array [%s]. Error [%s]."
+ % (self.nvsram_name, self.ssid, to_native(error)))
+
+ self.proxy_check_nvsram_compatibility()
+
+ for file in cfw_files:
+ if file["filename"] == self.firmware_name:
+ break
+ else:
+ fields = [("validate", "true")]
+ files = [("firmwareFile", self.firmware_name, self.firmware)]
+ headers, data = create_multipart_formdata(files=files, fields=fields)
+ try:
+ rc, response = self.request("firmware/upload", method="POST", data=data, headers=headers)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to upload firmware bundle file. File [%s]. Array [%s]. Error [%s]."
+ % (self.firmware_name, self.ssid, to_native(error)))
+
+ self.proxy_check_firmware_compatibility()
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve existing existing firmware files. Error [%s]" % to_native(error))
+
+ def proxy_check_upgrade_required(self):
+ """Staging is required to collect firmware information from the web services proxy."""
+ # Verify controller consistency and get firmware versions
+ try:
+ # Retrieve current bundle version
+ if self.is_firmware_bundled():
+ rc, response = self.request("storage-systems/%s/graph/xpath-filter?query=/controller/codeVersions[codeModule='bundleDisplay']" % self.ssid)
+ current_firmware_version = six.b(response[0]["versionString"])
+ else:
+ rc, response = self.request("storage-systems/%s/graph/xpath-filter?query=/sa/saData/fwVersion" % self.ssid)
+ current_firmware_version = six.b(response[0])
+
+ # Determine whether upgrade is required
+ if current_firmware_version != self.firmware_version():
+
+ current = current_firmware_version.split(b".")[:2]
+ upgrade = self.firmware_version().split(b".")[:2]
+ if current[0] < upgrade[0] or (current[0] == upgrade[0] and current[1] <= upgrade[1]):
+ self.upgrade_required = True
+ else:
+ self.module.fail_json(msg="Downgrades are not permitted. Firmware [%s]. Array [%s]." % (self.firmware, self.ssid))
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve controller firmware information. Array [%s]. Error [%s]" % (self.ssid, to_native(error)))
+ # Determine current NVSRAM version and whether change is required
+ try:
+ rc, response = self.request("storage-systems/%s/graph/xpath-filter?query=/sa/saData/nvsramVersion" % self.ssid)
+ if six.b(response[0]) != self.nvsram_version():
+ self.upgrade_required = True
+
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve storage system's NVSRAM version. Array [%s]. Error [%s]" % (self.ssid, to_native(error)))
+
+ def proxy_wait_for_upgrade(self, request_id):
+ """Wait for SANtricity Web Services Proxy to report upgrade complete"""
+ if self.is_firmware_bundled():
+ while True:
+ try:
+ sleep(5)
+ rc, response = self.request("batch/cfw-upgrade/%s" % request_id)
+
+ if response["status"] == "complete":
+ self.upgrade_in_progress = False
+ break
+ elif response["status"] in ["failed", "cancelled"]:
+ self.module.fail_json(msg="Firmware upgrade failed to complete. Array [%s]." % self.ssid)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve firmware upgrade status. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+ else:
+ for count in range(0, int(self.REBOOT_TIMEOUT_SEC / 5)):
+ try:
+ sleep(5)
+ rc_firmware, firmware = self.request("storage-systems/%s/graph/xpath-filter?query=/sa/saData/fwVersion" % self.ssid)
+ rc_nvsram, nvsram = self.request("storage-systems/%s/graph/xpath-filter?query=/sa/saData/nvsramVersion" % self.ssid)
+
+ if six.b(firmware[0]) == self.firmware_version() and six.b(nvsram[0]) == self.nvsram_version():
+ self.upgrade_in_progress = False
+ break
+ except Exception as error:
+ pass
+ else:
+ self.module.fail_json(msg="Timed out waiting for firmware upgrade to complete. Array [%s]." % self.ssid)
+
+ def proxy_upgrade(self):
+ """Activate previously uploaded firmware related files."""
+ request_id = None
+ if self.is_firmware_bundled():
+ data = {"activate": True,
+ "firmwareFile": self.firmware_name,
+ "nvsramFile": self.nvsram_name,
+ "systemInfos": [{"systemId": self.ssid,
+ "allowNonOptimalActivation": self.ignore_health_check}]}
+ try:
+ rc, response = self.request("batch/cfw-upgrade", method="POST", data=data)
+ request_id = response["requestId"]
+ except Exception as error:
+ self.module.fail_json(msg="Failed to initiate firmware upgrade. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ else:
+ data = {"stageFirmware": False,
+ "skipMelCheck": self.ignore_health_check,
+ "cfwFile": self.firmware_name,
+ "nvsramFile": self.nvsram_name}
+ try:
+ rc, response = self.request("storage-systems/%s/cfw-upgrade" % self.ssid, method="POST", data=data)
+ request_id = response["requestId"]
+ except Exception as error:
+ self.module.fail_json(msg="Failed to initiate firmware upgrade. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ self.upgrade_in_progress = True
+ if self.wait_for_completion:
+ self.proxy_wait_for_upgrade(request_id)
+
+ def apply(self):
+ """Upgrade controller firmware."""
+ self.check_system_health()
+
+ # Verify firmware compatibility and whether changes are required
+ if self.is_embedded():
+ self.embedded_check_compatibility()
+ else:
+ self.proxy_check_upgrade_required()
+
+ # This will upload the firmware files to the web services proxy but not to the controller
+ if self.upgrade_required:
+ self.proxy_upload_and_check_compatibility()
+
+ # Perform upgrade
+ if self.upgrade_required and not self.module.check_mode:
+ if self.is_embedded():
+ self.embedded_upgrade()
+ else:
+ self.proxy_upgrade()
+
+ self.module.exit_json(changed=self.upgrade_required, upgrade_in_process=self.upgrade_in_progress, status=self.module_info)
+
+
+def main():
+ firmware = NetAppESeriesFirmware()
+ firmware.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_flashcache.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_flashcache.py
new file mode 100644
index 000000000..3ffacedda
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_flashcache.py
@@ -0,0 +1,442 @@
+#!/usr/bin/python
+
+# (c) 2016, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['deprecated'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+module: netapp_e_flashcache
+author: Kevin Hulquest (@hulquest)
+version_added: '2.2'
+short_description: NetApp E-Series manage SSD caches
+description:
+- Create or remove SSD caches on a NetApp E-Series storage array.
+options:
+ api_username:
+ required: true
+ type: str
+ description:
+ - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_password:
+ required: true
+ type: str
+ description:
+ - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_url:
+ required: true
+ type: str
+ description:
+ - The url to the SANtricity WebServices Proxy or embedded REST API.
+ validate_certs:
+ required: false
+ default: true
+ description:
+ - Should https certificates be validated?
+ type: bool
+ ssid:
+ required: true
+ type: str
+ description:
+ - The ID of the array to manage (as configured on the web services proxy).
+ state:
+ required: true
+ type: str
+ description:
+ - Whether the specified SSD cache should exist or not.
+ choices: ['present', 'absent']
+ default: present
+ name:
+ required: true
+ type: str
+ description:
+ - The name of the SSD cache to manage
+ io_type:
+ description:
+ - The type of workload to optimize the cache for.
+ choices: ['filesystem','database','media']
+ default: filesystem
+ type: str
+ disk_count:
+ type: int
+ description:
+ - The minimum number of disks to use for building the cache. The cache will be expanded if this number exceeds the number of disks already in place
+ disk_refs:
+ description:
+ - List of disk references
+ type: list
+ size_unit:
+ description:
+ - The unit to be applied to size arguments
+ choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
+ default: gb
+ type: str
+ cache_size_min:
+ description:
+ - The minimum size (in size_units) of the ssd cache. The cache will be expanded if this exceeds the current size of the cache.
+ type: int
+ criteria_disk_phy_type:
+ description:
+ - Type of physical disk
+ choices: ['sas', 'sas4k', 'fibre', 'fibre520b', 'scsi', 'sata', 'pata']
+ type: str
+ log_mode:
+ type: str
+ description:
+ - Log mode
+ log_path:
+ type: str
+ description:
+ - Log path
+'''
+
+EXAMPLES = """
+ - name: Flash Cache
+ netapp_e_flashcache:
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+ name: SSDCacheBuiltByAnsible
+"""
+
+RETURN = """
+msg:
+ description: Success message
+ returned: success
+ type: str
+ sample: json for newly created flash cache
+"""
+import json
+import logging
+import sys
+import traceback
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import reduce
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+from ansible.module_utils._text import to_native
+from ansible.module_utils.urls import open_url
+
+
+def request(url, data=None, headers=None, method='GET', use_proxy=True,
+ force=False, last_mod_time=None, timeout=10, validate_certs=True,
+ url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
+ try:
+ r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
+ force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
+ url_username=url_username, url_password=url_password, http_agent=http_agent,
+ force_basic_auth=force_basic_auth)
+ except HTTPError as err:
+ r = err.fp
+
+ try:
+ raw_data = r.read()
+ if raw_data:
+ data = json.loads(raw_data)
+ else:
+ raw_data = None
+ except Exception:
+ if ignore_errors:
+ pass
+ else:
+ raise Exception(raw_data)
+
+ resp_code = r.getcode()
+
+ if resp_code >= 400 and not ignore_errors:
+ raise Exception(resp_code, data)
+ else:
+ return resp_code, data
+
+
+class NetAppESeriesFlashCache(object):
+ def __init__(self):
+ self.name = None
+ self.log_mode = None
+ self.log_path = None
+ self.api_url = None
+ self.api_username = None
+ self.api_password = None
+ self.ssid = None
+ self.validate_certs = None
+ self.disk_count = None
+ self.size_unit = None
+ self.cache_size_min = None
+ self.io_type = None
+ self.driveRefs = None
+ self.state = None
+ self._size_unit_map = dict(
+ bytes=1,
+ b=1,
+ kb=1024,
+ mb=1024 ** 2,
+ gb=1024 ** 3,
+ tb=1024 ** 4,
+ pb=1024 ** 5,
+ eb=1024 ** 6,
+ zb=1024 ** 7,
+ yb=1024 ** 8
+ )
+
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ api_username=dict(type='str', required=True),
+ api_password=dict(type='str', required=True, no_log=True),
+ api_url=dict(type='str', required=True),
+ state=dict(default='present', choices=['present', 'absent'], type='str'),
+ ssid=dict(required=True, type='str'),
+ name=dict(required=True, type='str'),
+ disk_count=dict(type='int'),
+ disk_refs=dict(type='list'),
+ cache_size_min=dict(type='int'),
+ io_type=dict(default='filesystem', choices=['filesystem', 'database', 'media']),
+ size_unit=dict(default='gb', choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb'],
+ type='str'),
+ criteria_disk_phy_type=dict(choices=['sas', 'sas4k', 'fibre', 'fibre520b', 'scsi', 'sata', 'pata'],
+ type='str'),
+ log_mode=dict(type='str'),
+ log_path=dict(type='str'),
+ ))
+ self.module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_if=[
+
+ ],
+ mutually_exclusive=[
+
+ ],
+ # TODO: update validation for various selection criteria
+ supports_check_mode=True
+ )
+
+ self.__dict__.update(self.module.params)
+
+ # logging setup
+ self._logger = logging.getLogger(self.__class__.__name__)
+ self.debug = self._logger.debug
+
+ if self.log_mode == 'file' and self.log_path:
+ logging.basicConfig(level=logging.DEBUG, filename=self.log_path)
+ elif self.log_mode == 'stderr':
+ logging.basicConfig(level=logging.DEBUG, stream=sys.stderr)
+
+ self.post_headers = dict(Accept="application/json")
+ self.post_headers['Content-Type'] = 'application/json'
+
+ def get_candidate_disks(self, disk_count, size_unit='gb', capacity=None):
+ self.debug("getting candidate disks...")
+
+ drives_req = dict(
+ driveCount=disk_count,
+ sizeUnit=size_unit,
+ driveType='ssd',
+ )
+
+ if capacity:
+ drives_req['targetUsableCapacity'] = capacity
+
+ (rc, drives_resp) = request(self.api_url + "/storage-systems/%s/drives" % (self.ssid),
+ data=json.dumps(drives_req), headers=self.post_headers, method='POST',
+ url_username=self.api_username, url_password=self.api_password,
+ validate_certs=self.validate_certs)
+
+ if rc == 204:
+ self.module.fail_json(msg='Cannot find disks to match requested criteria for ssd cache')
+
+ disk_ids = [d['id'] for d in drives_resp]
+ bytes = reduce(lambda s, d: s + int(d['usableCapacity']), drives_resp, 0)
+
+ return (disk_ids, bytes)
+
+ def create_cache(self):
+ (disk_ids, bytes) = self.get_candidate_disks(disk_count=self.disk_count, size_unit=self.size_unit,
+ capacity=self.cache_size_min)
+
+ self.debug("creating ssd cache...")
+
+ create_fc_req = dict(
+ driveRefs=disk_ids,
+ name=self.name
+ )
+
+ (rc, self.resp) = request(self.api_url + "/storage-systems/%s/flash-cache" % (self.ssid),
+ data=json.dumps(create_fc_req), headers=self.post_headers, method='POST',
+ url_username=self.api_username, url_password=self.api_password,
+ validate_certs=self.validate_certs)
+
+ def update_cache(self):
+ self.debug('updating flash cache config...')
+ update_fc_req = dict(
+ name=self.name,
+ configType=self.io_type
+ )
+
+ (rc, self.resp) = request(self.api_url + "/storage-systems/%s/flash-cache/configure" % (self.ssid),
+ data=json.dumps(update_fc_req), headers=self.post_headers, method='POST',
+ url_username=self.api_username, url_password=self.api_password,
+ validate_certs=self.validate_certs)
+
+ def delete_cache(self):
+ self.debug('deleting flash cache...')
+ (rc, self.resp) = request(self.api_url + "/storage-systems/%s/flash-cache" % (self.ssid), method='DELETE',
+ url_username=self.api_username, url_password=self.api_password,
+ validate_certs=self.validate_certs, ignore_errors=True)
+
+ @property
+ def needs_more_disks(self):
+ if len(self.cache_detail['driveRefs']) < self.disk_count:
+ self.debug("needs resize: current disk count %s < requested requested count %s",
+ len(self.cache_detail['driveRefs']), self.disk_count)
+ return True
+
+ @property
+ def needs_less_disks(self):
+ if len(self.cache_detail['driveRefs']) > self.disk_count:
+ self.debug("needs resize: current disk count %s < requested requested count %s",
+ len(self.cache_detail['driveRefs']), self.disk_count)
+ return True
+
+ @property
+ def current_size_bytes(self):
+ return int(self.cache_detail['fcDriveInfo']['fcWithDrives']['usedCapacity'])
+
+ @property
+ def requested_size_bytes(self):
+ if self.cache_size_min:
+ return self.cache_size_min * self._size_unit_map[self.size_unit]
+ else:
+ return 0
+
+ @property
+ def needs_more_capacity(self):
+ if self.current_size_bytes < self.requested_size_bytes:
+ self.debug("needs resize: current capacity %sb is less than requested minimum %sb",
+ self.current_size_bytes, self.requested_size_bytes)
+ return True
+
+ @property
+ def needs_resize(self):
+ return self.needs_more_disks or self.needs_more_capacity or self.needs_less_disks
+
+ def resize_cache(self):
+ # increase up to disk count first, then iteratively add disks until we meet requested capacity
+
+ # TODO: perform this calculation in check mode
+ current_disk_count = len(self.cache_detail['driveRefs'])
+ proposed_new_disks = 0
+
+ proposed_additional_bytes = 0
+ proposed_disk_ids = []
+
+ if self.needs_more_disks:
+ proposed_disk_count = self.disk_count - current_disk_count
+
+ (disk_ids, bytes) = self.get_candidate_disks(disk_count=proposed_disk_count)
+ proposed_additional_bytes = bytes
+ proposed_disk_ids = disk_ids
+
+ while self.current_size_bytes + proposed_additional_bytes < self.requested_size_bytes:
+ proposed_new_disks += 1
+ (disk_ids, bytes) = self.get_candidate_disks(disk_count=proposed_new_disks)
+ proposed_disk_ids = disk_ids
+ proposed_additional_bytes = bytes
+
+ add_drives_req = dict(
+ driveRef=proposed_disk_ids
+ )
+
+ self.debug("adding drives to flash-cache...")
+ (rc, self.resp) = request(self.api_url + "/storage-systems/%s/flash-cache/addDrives" % (self.ssid),
+ data=json.dumps(add_drives_req), headers=self.post_headers, method='POST',
+ url_username=self.api_username, url_password=self.api_password,
+ validate_certs=self.validate_certs)
+
+ elif self.needs_less_disks and self.driveRefs:
+ rm_drives = dict(driveRef=self.driveRefs)
+ (rc, self.resp) = request(self.api_url + "/storage-systems/%s/flash-cache/removeDrives" % (self.ssid),
+ data=json.dumps(rm_drives), headers=self.post_headers, method='POST',
+ url_username=self.api_username, url_password=self.api_password,
+ validate_certs=self.validate_certs)
+
+ def apply(self):
+ result = dict(changed=False)
+ (rc, cache_resp) = request(self.api_url + "/storage-systems/%s/flash-cache" % (self.ssid),
+ url_username=self.api_username, url_password=self.api_password,
+ validate_certs=self.validate_certs, ignore_errors=True)
+
+ if rc == 200:
+ self.cache_detail = cache_resp
+ else:
+ self.cache_detail = None
+
+ if rc not in [200, 404]:
+ raise Exception(
+ "Unexpected error code %s fetching flash cache detail. Response data was %s" % (rc, cache_resp))
+
+ if self.state == 'present':
+ if self.cache_detail:
+ # TODO: verify parameters against detail for changes
+ if self.cache_detail['name'] != self.name:
+ self.debug("CHANGED: name differs")
+ result['changed'] = True
+ if self.cache_detail['flashCacheBase']['configType'] != self.io_type:
+ self.debug("CHANGED: io_type differs")
+ result['changed'] = True
+ if self.needs_resize:
+ self.debug("CHANGED: resize required")
+ result['changed'] = True
+ else:
+ self.debug("CHANGED: requested state is 'present' but cache does not exist")
+ result['changed'] = True
+ else: # requested state is absent
+ if self.cache_detail:
+ self.debug("CHANGED: requested state is 'absent' but cache exists")
+ result['changed'] = True
+
+ if not result['changed']:
+ self.debug("no changes, exiting...")
+ self.module.exit_json(**result)
+
+ if self.module.check_mode:
+ self.debug("changes pending in check mode, exiting early...")
+ self.module.exit_json(**result)
+
+ if self.state == 'present':
+ if not self.cache_detail:
+ self.create_cache()
+ else:
+ if self.needs_resize:
+ self.resize_cache()
+
+ # run update here as well, since io_type can't be set on creation
+ self.update_cache()
+
+ elif self.state == 'absent':
+ self.delete_cache()
+
+ # TODO: include other details about the storage pool (size, type, id, etc)
+ self.module.exit_json(changed=result['changed'], **self.resp)
+
+
+def main():
+ sp = NetAppESeriesFlashCache()
+ try:
+ sp.apply()
+ except Exception as e:
+ sp.debug("Exception in apply(): \n%s", to_native(e))
+ sp.module.fail_json(msg="Failed to create flash cache. Error[%s]" % to_native(e),
+ exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_global.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_global.py
new file mode 100644
index 000000000..1284b2891
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_global.py
@@ -0,0 +1,159 @@
+#!/usr/bin/python
+
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['deprecated'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = """
+---
+module: netapp_e_global
+short_description: NetApp E-Series manage global settings configuration
+description:
+ - Allow the user to configure several of the global settings associated with an E-Series storage-system
+version_added: '2.7'
+author: Michael Price (@lmprice)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.netapp.eseries
+options:
+ name:
+ description:
+ - Set the name of the E-Series storage-system
+ - This label/name doesn't have to be unique.
+ - May be up to 30 characters in length.
+ type: str
+ aliases:
+ - label
+ log_path:
+ description:
+ - A local path to a file to be used for debug logging
+ required: no
+ type: str
+notes:
+ - Check mode is supported.
+ - This module requires Web Services API v1.3 or newer.
+"""
+
+EXAMPLES = """
+ - name: Set the storage-system name
+ netapp_e_global:
+ name: myArrayName
+ api_url: "10.1.1.1:8443"
+ api_username: "admin"
+ api_password: "myPass"
+"""
+
+RETURN = """
+msg:
+ description: Success message
+ returned: on success
+ type: str
+ sample: The settings have been updated.
+name:
+ description:
+ - The current name/label of the storage-system.
+ returned: on success
+ sample: myArrayName
+ type: str
+"""
+import json
+import logging
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.netapp import request, eseries_host_argument_spec
+from ansible.module_utils._text import to_native
+
+HEADERS = {
+ "Content-Type": "application/json",
+ "Accept": "application/json",
+}
+
+
+class GlobalSettings(object):
+ def __init__(self):
+ argument_spec = eseries_host_argument_spec()
+ argument_spec.update(dict(
+ name=dict(type='str', required=False, aliases=['label']),
+ log_path=dict(type='str', required=False),
+ ))
+
+ self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, )
+ args = self.module.params
+ self.name = args['name']
+
+ self.ssid = args['ssid']
+ self.url = args['api_url']
+ self.creds = dict(url_password=args['api_password'],
+ validate_certs=args['validate_certs'],
+ url_username=args['api_username'], )
+
+ self.check_mode = self.module.check_mode
+
+ log_path = args['log_path']
+
+ # logging setup
+ self._logger = logging.getLogger(self.__class__.__name__)
+
+ if log_path:
+ logging.basicConfig(
+ level=logging.DEBUG, filename=log_path, filemode='w',
+ format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s')
+
+ if not self.url.endswith('/'):
+ self.url += '/'
+
+ if self.name and len(self.name) > 30:
+ self.module.fail_json(msg="The provided name is invalid, it must be < 30 characters in length.")
+
+ def get_name(self):
+ try:
+ (rc, result) = request(self.url + 'storage-systems/%s' % self.ssid, headers=HEADERS, **self.creds)
+ if result['status'] in ['offline', 'neverContacted']:
+ self.module.fail_json(msg="This storage-system is offline! Array Id [%s]." % (self.ssid))
+ return result['name']
+ except Exception as err:
+ self.module.fail_json(msg="Connection failure! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+
+ def update_name(self):
+ name = self.get_name()
+ update = False
+ if self.name != name:
+ update = True
+
+ body = dict(name=self.name)
+
+ if update and not self.check_mode:
+ try:
+ (rc, result) = request(self.url + 'storage-systems/%s/configuration' % self.ssid, method='POST',
+ data=json.dumps(body), headers=HEADERS, **self.creds)
+ self._logger.info("Set name to %s.", result['name'])
+ # This is going to catch cases like a connection failure
+ except Exception as err:
+ self.module.fail_json(
+ msg="We failed to set the storage-system name! Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(err)))
+ return update
+
+ def update(self):
+ update = self.update_name()
+ name = self.get_name()
+
+ self.module.exit_json(msg="The requested settings have been updated.", changed=update, name=name)
+
+ def __call__(self, *args, **kwargs):
+ self.update()
+
+
+def main():
+ settings = GlobalSettings()
+ settings()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_host.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_host.py
new file mode 100644
index 000000000..699087f6c
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_host.py
@@ -0,0 +1,544 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, NetApp Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['deprecated'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = """
+---
+module: netapp_e_host
+short_description: NetApp E-Series manage eseries hosts
+description: Create, update, remove hosts on NetApp E-series storage arrays
+version_added: '2.2'
+author:
+ - Kevin Hulquest (@hulquest)
+ - Nathan Swartz (@ndswartz)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.netapp.eseries
+options:
+ name:
+ description:
+ - If the host doesn't yet exist, the label/name to assign at creation time.
+ - If the hosts already exists, this will be used to uniquely identify the host to make any required changes
+ required: True
+ type: str
+ aliases:
+ - label
+ state:
+ description:
+ - Set to absent to remove an existing host
+ - Set to present to modify or create a new host definition
+ choices:
+ - absent
+ - present
+ default: present
+ type: str
+ version_added: 2.7
+ host_type:
+ description:
+ - This is the type of host to be mapped
+ - Required when C(state=present)
+ - Either one of the following names can be specified, Linux DM-MP, VMWare, Windows, Windows Clustered, or a
+ host type index which can be found in M(netapp_e_facts)
+ type: str
+ aliases:
+ - host_type_index
+ ports:
+ description:
+ - A list of host ports you wish to associate with the host.
+ - Host ports are uniquely identified by their WWN or IQN. Their assignments to a particular host are
+ uniquely identified by a label and these must be unique.
+ required: False
+ type: list
+ suboptions:
+ type:
+ description:
+ - The interface type of the port to define.
+ - Acceptable choices depend on the capabilities of the target hardware/software platform.
+ required: true
+ choices:
+ - iscsi
+ - sas
+ - fc
+ - ib
+ - nvmeof
+ - ethernet
+ label:
+ description:
+ - A unique label to assign to this port assignment.
+ required: true
+ port:
+ description:
+ - The WWN or IQN of the hostPort to assign to this port definition.
+ required: true
+ force_port:
+ description:
+ - Allow ports that are already assigned to be re-assigned to your current host
+ required: false
+ type: bool
+ version_added: 2.7
+ group:
+ description:
+ - The unique identifier of the host-group you want the host to be a member of; this is used for clustering.
+ required: False
+ type: str
+ aliases:
+ - cluster
+ log_path:
+ description:
+ - A local path to a file to be used for debug logging
+ required: False
+ type: str
+ version_added: 2.7
+"""
+
+EXAMPLES = """
+ - name: Define or update an existing host named 'Host1'
+ netapp_e_host:
+ ssid: "1"
+ api_url: "10.113.1.101:8443"
+ api_username: admin
+ api_password: myPassword
+ name: "Host1"
+ state: present
+ host_type_index: Linux DM-MP
+ ports:
+ - type: 'iscsi'
+ label: 'PORT_1'
+ port: 'iqn.1996-04.de.suse:01:56f86f9bd1fe'
+ - type: 'fc'
+ label: 'FC_1'
+ port: '10:00:FF:7C:FF:FF:FF:01'
+ - type: 'fc'
+ label: 'FC_2'
+ port: '10:00:FF:7C:FF:FF:FF:00'
+
+ - name: Ensure a host named 'Host2' doesn't exist
+ netapp_e_host:
+ ssid: "1"
+ api_url: "10.113.1.101:8443"
+ api_username: admin
+ api_password: myPassword
+ name: "Host2"
+ state: absent
+"""
+
+RETURN = """
+msg:
+ description:
+ - A user-readable description of the actions performed.
+ returned: on success
+ type: str
+ sample: The host has been created.
+id:
+ description:
+ - the unique identifier of the host on the E-Series storage-system
+ returned: on success when state=present
+ type: str
+ sample: 00000000600A098000AAC0C3003004700AD86A52
+ version_added: "2.6"
+
+ssid:
+ description:
+ - the unique identifier of the E-Series storage-system with the current api
+ returned: on success
+ type: str
+ sample: 1
+ version_added: "2.6"
+
+api_url:
+ description:
+ - the url of the API that this request was processed by
+ returned: on success
+ type: str
+ sample: https://webservices.example.com:8443
+ version_added: "2.6"
+"""
+import json
+import logging
+import re
+from pprint import pformat
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.netapp import request, eseries_host_argument_spec
+from ansible.module_utils._text import to_native
+
+HEADERS = {
+ "Content-Type": "application/json",
+ "Accept": "application/json",
+}
+
+
+class Host(object):
+ HOST_TYPE_INDEXES = {"linux dm-mp": 28, "vmware": 10, "windows": 1, "windows clustered": 8}
+
+ def __init__(self):
+ argument_spec = eseries_host_argument_spec()
+ argument_spec.update(dict(
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ group=dict(type='str', required=False, aliases=['cluster']),
+ ports=dict(type='list', required=False),
+ force_port=dict(type='bool', default=False),
+ name=dict(type='str', required=True, aliases=['label']),
+ host_type=dict(type='str', aliases=['host_type_index']),
+ log_path=dict(type='str', required=False),
+ ))
+
+ self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+ self.check_mode = self.module.check_mode
+ args = self.module.params
+ self.group = args['group']
+ self.ports = args['ports']
+ self.force_port = args['force_port']
+ self.name = args['name']
+ self.state = args['state']
+ self.ssid = args['ssid']
+ self.url = args['api_url']
+ self.user = args['api_username']
+ self.pwd = args['api_password']
+ self.certs = args['validate_certs']
+
+ self.post_body = dict()
+ self.all_hosts = list()
+ self.host_obj = dict()
+ self.newPorts = list()
+ self.portsForUpdate = list()
+ self.portsForRemoval = list()
+
+ # Update host type with the corresponding index
+ host_type = args['host_type_index']
+ if host_type:
+ host_type = host_type.lower()
+ if host_type in [key.lower() for key in list(self.HOST_TYPE_INDEXES.keys())]:
+ self.host_type_index = self.HOST_TYPE_INDEXES[host_type]
+ elif host_type.isdigit():
+ self.host_type_index = int(args['host_type_index'])
+ else:
+ self.module.fail_json(msg="host_type must be either a host type name or host type index found integer"
+ " the documentation.")
+
+ # logging setup
+ self._logger = logging.getLogger(self.__class__.__name__)
+ if args['log_path']:
+ logging.basicConfig(
+ level=logging.DEBUG, filename=args['log_path'], filemode='w',
+ format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s')
+
+ if not self.url.endswith('/'):
+ self.url += '/'
+
+ # Ensure when state==present then host_type_index is defined
+ if self.state == "present" and self.host_type_index is None:
+ self.module.fail_json(msg="Host_type_index is required when state=='present'. Array Id: [%s]" % self.ssid)
+
+ # Fix port representation if they are provided with colons
+ if self.ports is not None:
+ for port in self.ports:
+ port['label'] = port['label'].lower()
+ port['type'] = port['type'].lower()
+ port['port'] = port['port'].lower()
+
+ # Determine whether address is 16-byte WWPN and, if so, remove
+ if re.match(r'^(0x)?[0-9a-f]{16}$', port['port'].replace(':', '')):
+ port['port'] = port['port'].replace(':', '').replace('0x', '')
+
+ def valid_host_type(self):
+ host_types = None
+ try:
+ (rc, host_types) = request(self.url + 'storage-systems/%s/host-types' % self.ssid, url_password=self.pwd,
+ url_username=self.user, validate_certs=self.certs, headers=HEADERS)
+ except Exception as err:
+ self.module.fail_json(
+ msg="Failed to get host types. Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+
+ try:
+ match = list(filter(lambda host_type: host_type['index'] == self.host_type_index, host_types))[0]
+ return True
+ except IndexError:
+ self.module.fail_json(msg="There is no host type with index %s" % self.host_type_index)
+
+ def assigned_host_ports(self, apply_unassigning=False):
+ """Determine if the hostPorts requested have already been assigned and return list of required used ports."""
+ used_host_ports = {}
+ for host in self.all_hosts:
+ if host['label'] != self.name:
+ for host_port in host['hostSidePorts']:
+ for port in self.ports:
+ if port['port'] == host_port["address"] or port['label'] == host_port['label']:
+ if not self.force_port:
+ self.module.fail_json(msg="There are no host ports available OR there are not enough"
+ " unassigned host ports")
+ else:
+ # Determine port reference
+ port_ref = [port["hostPortRef"] for port in host["ports"]
+ if port["hostPortName"] == host_port["address"]]
+ port_ref.extend([port["initiatorRef"] for port in host["initiators"]
+ if port["nodeName"]["iscsiNodeName"] == host_port["address"]])
+
+ # Create dictionary of hosts containing list of port references
+ if host["hostRef"] not in used_host_ports.keys():
+ used_host_ports.update({host["hostRef"]: port_ref})
+ else:
+ used_host_ports[host["hostRef"]].extend(port_ref)
+ else:
+ for host_port in host['hostSidePorts']:
+ for port in self.ports:
+ if ((host_port['label'] == port['label'] and host_port['address'] != port['port']) or
+ (host_port['label'] != port['label'] and host_port['address'] == port['port'])):
+ if not self.force_port:
+ self.module.fail_json(msg="There are no host ports available OR there are not enough"
+ " unassigned host ports")
+ else:
+ # Determine port reference
+ port_ref = [port["hostPortRef"] for port in host["ports"]
+ if port["hostPortName"] == host_port["address"]]
+ port_ref.extend([port["initiatorRef"] for port in host["initiators"]
+ if port["nodeName"]["iscsiNodeName"] == host_port["address"]])
+
+ # Create dictionary of hosts containing list of port references
+ if host["hostRef"] not in used_host_ports.keys():
+ used_host_ports.update({host["hostRef"]: port_ref})
+ else:
+ used_host_ports[host["hostRef"]].extend(port_ref)
+
+ # Unassign assigned ports
+ if apply_unassigning:
+ for host_ref in used_host_ports.keys():
+ try:
+ rc, resp = request(self.url + 'storage-systems/%s/hosts/%s' % (self.ssid, host_ref),
+ url_username=self.user, url_password=self.pwd, headers=HEADERS,
+ validate_certs=self.certs, method='POST',
+ data=json.dumps({"portsToRemove": used_host_ports[host_ref]}))
+ except Exception as err:
+ self.module.fail_json(msg="Failed to unassign host port. Host Id [%s]. Array Id [%s]. Ports [%s]."
+ " Error [%s]." % (self.host_obj['id'], self.ssid,
+ used_host_ports[host_ref], to_native(err)))
+
+ return used_host_ports
+
+ def group_id(self):
+ if self.group:
+ try:
+ (rc, all_groups) = request(self.url + 'storage-systems/%s/host-groups' % self.ssid,
+ url_password=self.pwd,
+ url_username=self.user, validate_certs=self.certs, headers=HEADERS)
+ except Exception as err:
+ self.module.fail_json(
+ msg="Failed to get host groups. Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+
+ try:
+ group_obj = list(filter(lambda group: group['name'] == self.group, all_groups))[0]
+ return group_obj['id']
+ except IndexError:
+ self.module.fail_json(msg="No group with the name: %s exists" % self.group)
+ else:
+ # Return the value equivalent of no group
+ return "0000000000000000000000000000000000000000"
+
+ def host_exists(self):
+ """Determine if the requested host exists
+ As a side effect, set the full list of defined hosts in 'all_hosts', and the target host in 'host_obj'.
+ """
+ match = False
+ all_hosts = list()
+
+ try:
+ (rc, all_hosts) = request(self.url + 'storage-systems/%s/hosts' % self.ssid, url_password=self.pwd,
+ url_username=self.user, validate_certs=self.certs, headers=HEADERS)
+ except Exception as err:
+ self.module.fail_json(
+ msg="Failed to determine host existence. Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+
+ # Augment the host objects
+ for host in all_hosts:
+ for port in host['hostSidePorts']:
+ port['type'] = port['type'].lower()
+ port['address'] = port['address'].lower()
+ port['label'] = port['label'].lower()
+
+ # Augment hostSidePorts with their ID (this is an omission in the API)
+ ports = dict((port['label'], port['id']) for port in host['ports'])
+ ports.update((port['label'], port['id']) for port in host['initiators'])
+
+ for host_side_port in host['hostSidePorts']:
+ if host_side_port['label'] in ports:
+ host_side_port['id'] = ports[host_side_port['label']]
+
+ if host['label'] == self.name:
+ self.host_obj = host
+ match = True
+
+ self.all_hosts = all_hosts
+ return match
+
+ def needs_update(self):
+ """Determine whether we need to update the Host object
+ As a side effect, we will set the ports that we need to update (portsForUpdate), and the ports we need to add
+ (newPorts), on self.
+ """
+ changed = False
+ if (self.host_obj["clusterRef"].lower() != self.group_id().lower() or
+ self.host_obj["hostTypeIndex"] != self.host_type_index):
+ self._logger.info("Either hostType or the clusterRef doesn't match, an update is required.")
+ changed = True
+ current_host_ports = dict((port["id"], {"type": port["type"], "port": port["address"], "label": port["label"]})
+ for port in self.host_obj["hostSidePorts"])
+
+ if self.ports:
+ for port in self.ports:
+ for current_host_port_id in current_host_ports.keys():
+ if port == current_host_ports[current_host_port_id]:
+ current_host_ports.pop(current_host_port_id)
+ break
+ elif port["port"] == current_host_ports[current_host_port_id]["port"]:
+ if self.port_on_diff_host(port) and not self.force_port:
+ self.module.fail_json(msg="The port you specified [%s] is associated with a different host."
+ " Specify force_port as True or try a different port spec" % port)
+
+ if (port["label"] != current_host_ports[current_host_port_id]["label"] or
+ port["type"] != current_host_ports[current_host_port_id]["type"]):
+ current_host_ports.pop(current_host_port_id)
+ self.portsForUpdate.append({"portRef": current_host_port_id, "port": port["port"],
+ "label": port["label"], "hostRef": self.host_obj["hostRef"]})
+ break
+ else:
+ self.newPorts.append(port)
+
+ self.portsForRemoval = list(current_host_ports.keys())
+ changed = any([self.newPorts, self.portsForUpdate, self.portsForRemoval, changed])
+
+ return changed
+
+ def port_on_diff_host(self, arg_port):
+ """ Checks to see if a passed in port arg is present on a different host """
+ for host in self.all_hosts:
+ # Only check 'other' hosts
+ if host['name'] != self.name:
+ for port in host['hostSidePorts']:
+ # Check if the port label is found in the port dict list of each host
+ if arg_port['label'] == port['label'] or arg_port['port'] == port['address']:
+ self.other_host = host
+ return True
+ return False
+
+ def update_host(self):
+ self._logger.info("Beginning the update for host=%s.", self.name)
+
+ if self.ports:
+
+ # Remove ports that need reassigning from their current host.
+ self.assigned_host_ports(apply_unassigning=True)
+
+ self.post_body["portsToUpdate"] = self.portsForUpdate
+ self.post_body["ports"] = self.newPorts
+ self._logger.info("Requested ports: %s", pformat(self.ports))
+ else:
+ self._logger.info("No host ports were defined.")
+
+ if self.group:
+ self.post_body['groupId'] = self.group_id()
+
+ self.post_body['hostType'] = dict(index=self.host_type_index)
+
+ api = self.url + 'storage-systems/%s/hosts/%s' % (self.ssid, self.host_obj['id'])
+ self._logger.info("POST => url=%s, body=%s.", api, pformat(self.post_body))
+
+ if not self.check_mode:
+ try:
+ (rc, self.host_obj) = request(api, url_username=self.user, url_password=self.pwd, headers=HEADERS,
+ validate_certs=self.certs, method='POST', data=json.dumps(self.post_body))
+ except Exception as err:
+ self.module.fail_json(
+ msg="Failed to update host. Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+
+ payload = self.build_success_payload(self.host_obj)
+ self.module.exit_json(changed=True, **payload)
+
+ def create_host(self):
+ self._logger.info("Creating host definition.")
+
+ # Remove ports that need reassigning from their current host.
+ self.assigned_host_ports(apply_unassigning=True)
+
+ # needs_reassignment = False
+ post_body = dict(
+ name=self.name,
+ hostType=dict(index=self.host_type_index),
+ groupId=self.group_id(),
+ )
+
+ if self.ports:
+ post_body.update(ports=self.ports)
+
+ api = self.url + "storage-systems/%s/hosts" % self.ssid
+ self._logger.info('POST => url=%s, body=%s', api, pformat(post_body))
+
+ if not self.check_mode:
+ if not self.host_exists():
+ try:
+ (rc, self.host_obj) = request(api, method='POST', url_username=self.user, url_password=self.pwd, validate_certs=self.certs,
+ data=json.dumps(post_body), headers=HEADERS)
+ except Exception as err:
+ self.module.fail_json(
+ msg="Failed to create host. Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+ else:
+ payload = self.build_success_payload(self.host_obj)
+ self.module.exit_json(changed=False, msg="Host already exists. Id [%s]. Host [%s]." % (self.ssid, self.name), **payload)
+
+ payload = self.build_success_payload(self.host_obj)
+ self.module.exit_json(changed=True, msg='Host created.', **payload)
+
+ def remove_host(self):
+ try:
+ (rc, resp) = request(self.url + "storage-systems/%s/hosts/%s" % (self.ssid, self.host_obj['id']),
+ method='DELETE',
+ url_username=self.user, url_password=self.pwd, validate_certs=self.certs)
+ except Exception as err:
+ self.module.fail_json(
+ msg="Failed to remove host. Host[%s]. Array Id [%s]. Error [%s]." % (self.host_obj['id'],
+ self.ssid,
+ to_native(err)))
+
+ def build_success_payload(self, host=None):
+ keys = ['id']
+ if host is not None:
+ result = dict((key, host[key]) for key in keys)
+ else:
+ result = dict()
+ result['ssid'] = self.ssid
+ result['api_url'] = self.url
+ return result
+
+ def apply(self):
+ if self.state == 'present':
+ if self.host_exists():
+ if self.needs_update() and self.valid_host_type():
+ self.update_host()
+ else:
+ payload = self.build_success_payload(self.host_obj)
+ self.module.exit_json(changed=False, msg="Host already present; no changes required.", **payload)
+ elif self.valid_host_type():
+ self.create_host()
+ else:
+ payload = self.build_success_payload()
+ if self.host_exists():
+ self.remove_host()
+ self.module.exit_json(changed=True, msg="Host removed.", **payload)
+ else:
+ self.module.exit_json(changed=False, msg="Host already absent.", **payload)
+
+
+def main():
+ host = Host()
+ host.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_hostgroup.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_hostgroup.py
new file mode 100644
index 000000000..87676106f
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_hostgroup.py
@@ -0,0 +1,307 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {"metadata_version": "1.1",
+ "status": ["deprecated"],
+ "supported_by": "community"}
+
+
+DOCUMENTATION = """
+---
+module: netapp_e_hostgroup
+version_added: "2.2"
+short_description: NetApp E-Series manage array host groups
+author:
+ - Kevin Hulquest (@hulquest)
+ - Nathan Swartz (@ndswartz)
+description: Create, update or destroy host groups on a NetApp E-Series storage array.
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.netapp.eseries
+options:
+ state:
+ required: true
+ description:
+ - Whether the specified host group should exist or not.
+ type: str
+ choices: ["present", "absent"]
+ name:
+ required: false
+ description:
+ - Name of the host group to manage
+ - This option is mutually exclusive with I(id).
+ type: str
+ new_name:
+ required: false
+ description:
+ - Specify this when you need to update the name of a host group
+ type: str
+ id:
+ required: false
+ description:
+ - Host reference identifier for the host group to manage.
+ - This option is mutually exclusive with I(name).
+ type: str
+ hosts:
+ required: false
+ description:
+ - List of host names/labels to add to the group
+ type: list
+"""
+EXAMPLES = """
+ - name: Configure Hostgroup
+ netapp_e_hostgroup:
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+ state: present
+"""
+RETURN = """
+clusterRef:
+ description: The unique identification value for this object. Other objects may use this reference value to refer to the cluster.
+ returned: always except when state is absent
+ type: str
+ sample: "3233343536373839303132333100000000000000"
+confirmLUNMappingCreation:
+ description: If true, indicates that creation of LUN-to-volume mappings should require careful confirmation from the end-user, since such a mapping
+ will alter the volume access rights of other clusters, in addition to this one.
+ returned: always
+ type: bool
+ sample: false
+hosts:
+ description: A list of the hosts that are part of the host group after all operations.
+ returned: always except when state is absent
+ type: list
+ sample: ["HostA","HostB"]
+id:
+ description: The id number of the hostgroup
+ returned: always except when state is absent
+ type: str
+ sample: "3233343536373839303132333100000000000000"
+isSAControlled:
+ description: If true, indicates that I/O accesses from this cluster are subject to the storage array's default LUN-to-volume mappings. If false,
+ indicates that I/O accesses from the cluster are subject to cluster-specific LUN-to-volume mappings.
+ returned: always except when state is absent
+ type: bool
+ sample: false
+label:
+ description: The user-assigned, descriptive label string for the cluster.
+ returned: always
+ type: str
+ sample: "MyHostGroup"
+name:
+ description: same as label
+ returned: always except when state is absent
+ type: str
+ sample: "MyHostGroup"
+protectionInformationCapableAccessMethod:
+ description: This field is true if the host has a PI capable access method.
+ returned: always except when state is absent
+ type: bool
+ sample: true
+"""
+
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.netapp import NetAppESeriesModule
+from ansible.module_utils._text import to_native
+
+
+class NetAppESeriesHostGroup(NetAppESeriesModule):
+ EXPANSION_TIMEOUT_SEC = 10
+ DEFAULT_DISK_POOL_MINIMUM_DISK_COUNT = 11
+
+ def __init__(self):
+ version = "02.00.0000.0000"
+ ansible_options = dict(
+ state=dict(required=True, choices=["present", "absent"], type="str"),
+ name=dict(required=False, type="str"),
+ new_name=dict(required=False, type="str"),
+ id=dict(required=False, type="str"),
+ hosts=dict(required=False, type="list"))
+ mutually_exclusive = [["name", "id"]]
+ super(NetAppESeriesHostGroup, self).__init__(ansible_options=ansible_options,
+ web_services_version=version,
+ supports_check_mode=True,
+ mutually_exclusive=mutually_exclusive)
+
+ args = self.module.params
+ self.state = args["state"]
+ self.name = args["name"]
+ self.new_name = args["new_name"]
+ self.id = args["id"]
+ self.hosts_list = args["hosts"]
+
+ self.current_host_group = None
+
+ @property
+ def hosts(self):
+ """Retrieve a list of host reference identifiers should be associated with the host group."""
+ host_list = []
+ existing_hosts = []
+
+ if self.hosts_list:
+ try:
+ rc, existing_hosts = self.request("storage-systems/%s/hosts" % self.ssid)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve hosts information. Array id [%s]. Error[%s]."
+ % (self.ssid, to_native(error)))
+
+ for host in self.hosts_list:
+ for existing_host in existing_hosts:
+ if host in existing_host["id"] or host in existing_host["name"]:
+ host_list.append(existing_host["id"])
+ break
+ else:
+ self.module.fail_json(msg="Expected host does not exist. Array id [%s]. Host [%s]."
+ % (self.ssid, host))
+
+ return host_list
+
+ @property
+ def host_groups(self):
+ """Retrieve a list of existing host groups."""
+ host_groups = []
+ hosts = []
+ try:
+ rc, host_groups = self.request("storage-systems/%s/host-groups" % self.ssid)
+ rc, hosts = self.request("storage-systems/%s/hosts" % self.ssid)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve host group information. Array id [%s]. Error[%s]."
+ % (self.ssid, to_native(error)))
+
+ host_groups = [{"id": group["clusterRef"], "name": group["name"]} for group in host_groups]
+ for group in host_groups:
+ hosts_ids = []
+ for host in hosts:
+ if group["id"] == host["clusterRef"]:
+ hosts_ids.append(host["hostRef"])
+ group.update({"hosts": hosts_ids})
+
+ return host_groups
+
+ @property
+ def current_hosts_in_host_group(self):
+ """Retrieve the current hosts associated with the current hostgroup."""
+ current_hosts = []
+ for group in self.host_groups:
+ if (self.name and group["name"] == self.name) or (self.id and group["id"] == self.id):
+ current_hosts = group["hosts"]
+
+ return current_hosts
+
+ def unassign_hosts(self, host_list=None):
+ """Unassign hosts from host group."""
+ if host_list is None:
+ host_list = self.current_host_group["hosts"]
+
+ for host_id in host_list:
+ try:
+ rc, resp = self.request("storage-systems/%s/hosts/%s/move" % (self.ssid, host_id),
+ method="POST", data={"group": "0000000000000000000000000000000000000000"})
+ except Exception as error:
+ self.module.fail_json(msg="Failed to unassign hosts from host group. Array id [%s]. Host id [%s]."
+ " Error[%s]." % (self.ssid, host_id, to_native(error)))
+
+ def delete_host_group(self, unassign_hosts=True):
+ """Delete host group"""
+ if unassign_hosts:
+ self.unassign_hosts()
+
+ try:
+ rc, resp = self.request("storage-systems/%s/host-groups/%s" % (self.ssid, self.current_host_group["id"]),
+ method="DELETE")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to delete host group. Array id [%s]. Error[%s]."
+ % (self.ssid, to_native(error)))
+
+ def create_host_group(self):
+ """Create host group."""
+ data = {"name": self.name, "hosts": self.hosts}
+
+ response = None
+ try:
+ rc, response = self.request("storage-systems/%s/host-groups" % self.ssid, method="POST", data=data)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to create host group. Array id [%s]. Error[%s]."
+ % (self.ssid, to_native(error)))
+
+ return response
+
+ def update_host_group(self):
+ """Update host group."""
+ data = {"name": self.new_name if self.new_name else self.name,
+ "hosts": self.hosts}
+
+ # unassign hosts that should not be part of the hostgroup
+ desired_host_ids = self.hosts
+ for host in self.current_hosts_in_host_group:
+ if host not in desired_host_ids:
+ self.unassign_hosts([host])
+
+ update_response = None
+ try:
+ rc, update_response = self.request("storage-systems/%s/host-groups/%s"
+ % (self.ssid, self.current_host_group["id"]), method="POST", data=data)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to create host group. Array id [%s]. Error[%s]."
+ % (self.ssid, to_native(error)))
+
+ return update_response
+
+ def apply(self):
+ """Apply desired host group state to the storage array."""
+ changes_required = False
+
+ # Search for existing host group match
+ for group in self.host_groups:
+ if (self.id and group["id"] == self.id) or (self.name and group["name"] == self.name):
+ self.current_host_group = group
+
+ # Determine whether changes are required
+ if self.state == "present":
+ if self.current_host_group:
+ if (self.new_name and self.new_name != self.name) or self.hosts != self.current_host_group["hosts"]:
+ changes_required = True
+ else:
+ if not self.name:
+ self.module.fail_json(msg="The option name must be supplied when creating a new host group."
+ " Array id [%s]." % self.ssid)
+ changes_required = True
+
+ elif self.current_host_group:
+ changes_required = True
+
+ # Apply any necessary changes
+ msg = ""
+ if changes_required and not self.module.check_mode:
+ msg = "No changes required."
+ if self.state == "present":
+ if self.current_host_group:
+ if ((self.new_name and self.new_name != self.name) or
+ (self.hosts != self.current_host_group["hosts"])):
+ msg = self.update_host_group()
+ else:
+ msg = self.create_host_group()
+
+ elif self.current_host_group:
+ self.delete_host_group()
+ msg = "Host group deleted. Array Id [%s]. Host Name [%s]. Host Id [%s]."\
+ % (self.ssid, self.current_host_group["name"], self.current_host_group["id"])
+
+ self.module.exit_json(msg=msg, changed=changes_required)
+
+
+def main():
+ hostgroup = NetAppESeriesHostGroup()
+ hostgroup.apply()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_iscsi_interface.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_iscsi_interface.py
new file mode 100644
index 000000000..5e290f74e
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_iscsi_interface.py
@@ -0,0 +1,407 @@
+#!/usr/bin/python
+
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['deprecated'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = """
+---
+module: netapp_e_iscsi_interface
+short_description: NetApp E-Series manage iSCSI interface configuration
+description:
+ - Configure settings of an E-Series iSCSI interface
+version_added: '2.7'
+author: Michael Price (@lmprice)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.netapp.eseries
+options:
+ controller:
+ description:
+ - The controller that owns the port you want to configure.
+ - Controller names are presented alphabetically, with the first controller as A,
+ the second as B, and so on.
+ - Current hardware models have either 1 or 2 available controllers, but that is not a guaranteed hard
+ limitation and could change in the future.
+ required: yes
+ type: str
+ choices:
+ - A
+ - B
+ name:
+ description:
+ - The channel of the port to modify the configuration of.
+ - The list of choices is not necessarily comprehensive. It depends on the number of ports
+ that are available in the system.
+ - The numerical value represents the number of the channel (typically from left to right on the HIC),
+ beginning with a value of 1.
+ required: yes
+ type: int
+ aliases:
+ - channel
+ state:
+ description:
+ - When enabled, the provided configuration will be utilized.
+ - When disabled, the IPv4 configuration will be cleared and IPv4 connectivity disabled.
+ choices:
+ - enabled
+ - disabled
+ default: enabled
+ type: str
+ address:
+ description:
+ - The IPv4 address to assign to the interface.
+ - Should be specified in xx.xx.xx.xx form.
+ - Mutually exclusive with I(config_method=dhcp)
+ type: str
+ subnet_mask:
+ description:
+ - The subnet mask to utilize for the interface.
+ - Should be specified in xx.xx.xx.xx form.
+ - Mutually exclusive with I(config_method=dhcp)
+ type: str
+ gateway:
+ description:
+ - The IPv4 gateway address to utilize for the interface.
+ - Should be specified in xx.xx.xx.xx form.
+ - Mutually exclusive with I(config_method=dhcp)
+ type: str
+ config_method:
+ description:
+ - The configuration method type to use for this interface.
+ - dhcp is mutually exclusive with I(address), I(subnet_mask), and I(gateway).
+ choices:
+ - dhcp
+ - static
+ default: dhcp
+ type: str
+ mtu:
+ description:
+ - The maximum transmission units (MTU), in bytes.
+ - This allows you to configure a larger value for the MTU, in order to enable jumbo frames
+ (any value > 1500).
+ - Generally, it is necessary to have your host, switches, and other components not only support jumbo
+ frames, but also have it configured properly. Therefore, unless you know what you're doing, it's best to
+ leave this at the default.
+ default: 1500
+ type: int
+ aliases:
+ - max_frame_size
+ log_path:
+ description:
+ - A local path to a file to be used for debug logging
+ type: str
+ required: no
+notes:
+ - Check mode is supported.
+ - The interface settings are applied synchronously, but changes to the interface itself (receiving a new IP address
+ via dhcp, etc), can take seconds or minutes longer to take effect.
+ - This module will not be useful/usable on an E-Series system without any iSCSI interfaces.
+ - This module requires a Web Services API version of >= 1.3.
+"""
+
+EXAMPLES = """
+ - name: Configure the first port on the A controller with a static IPv4 address
+ netapp_e_iscsi_interface:
+ name: "1"
+ controller: "A"
+ config_method: static
+ address: "192.168.1.100"
+ subnet_mask: "255.255.255.0"
+ gateway: "192.168.1.1"
+ ssid: "1"
+ api_url: "10.1.1.1:8443"
+ api_username: "admin"
+ api_password: "myPass"
+
+ - name: Disable ipv4 connectivity for the second port on the B controller
+ netapp_e_iscsi_interface:
+ name: "2"
+ controller: "B"
+ state: disabled
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+
+ - name: Enable jumbo frames for the first 4 ports on controller A
+ netapp_e_iscsi_interface:
+ name: "{{ item | int }}"
+ controller: "A"
+ state: enabled
+ mtu: 9000
+ config_method: dhcp
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ loop:
+ - 1
+ - 2
+ - 3
+ - 4
+"""
+
+RETURN = """
+msg:
+ description: Success message
+ returned: on success
+ type: str
+ sample: The interface settings have been updated.
+enabled:
+ description:
+ - Indicates whether IPv4 connectivity has been enabled or disabled.
+ - This does not necessarily indicate connectivity. If dhcp was enabled without a dhcp server, for instance,
+ it is unlikely that the configuration will actually be valid.
+ returned: on success
+ sample: True
+ type: bool
+"""
+import json
+import logging
+from pprint import pformat
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.netapp import request, eseries_host_argument_spec
+from ansible.module_utils._text import to_native
+
+HEADERS = {
+ "Content-Type": "application/json",
+ "Accept": "application/json",
+}
+
+
+class IscsiInterface(object):
+ def __init__(self):
+ argument_spec = eseries_host_argument_spec()
+ argument_spec.update(dict(
+ controller=dict(type='str', required=True, choices=['A', 'B']),
+ name=dict(type='int', aliases=['channel']),
+ state=dict(type='str', required=False, default='enabled', choices=['enabled', 'disabled']),
+ address=dict(type='str', required=False),
+ subnet_mask=dict(type='str', required=False),
+ gateway=dict(type='str', required=False),
+ config_method=dict(type='str', required=False, default='dhcp', choices=['dhcp', 'static']),
+ mtu=dict(type='int', default=1500, required=False, aliases=['max_frame_size']),
+ log_path=dict(type='str', required=False),
+ ))
+
+ required_if = [
+ ["config_method", "static", ["address", "subnet_mask"]],
+ ]
+
+ self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_if=required_if, )
+ args = self.module.params
+ self.controller = args['controller']
+ self.name = args['name']
+ self.mtu = args['mtu']
+ self.state = args['state']
+ self.address = args['address']
+ self.subnet_mask = args['subnet_mask']
+ self.gateway = args['gateway']
+ self.config_method = args['config_method']
+
+ self.ssid = args['ssid']
+ self.url = args['api_url']
+ self.creds = dict(url_password=args['api_password'],
+ validate_certs=args['validate_certs'],
+ url_username=args['api_username'], )
+
+ self.check_mode = self.module.check_mode
+ self.post_body = dict()
+ self.controllers = list()
+
+ log_path = args['log_path']
+
+ # logging setup
+ self._logger = logging.getLogger(self.__class__.__name__)
+
+ if log_path:
+ logging.basicConfig(
+ level=logging.DEBUG, filename=log_path, filemode='w',
+ format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s')
+
+ if not self.url.endswith('/'):
+ self.url += '/'
+
+ if self.mtu < 1500 or self.mtu > 9000:
+ self.module.fail_json(msg="The provided mtu is invalid, it must be > 1500 and < 9000 bytes.")
+
+ if self.config_method == 'dhcp' and any([self.address, self.subnet_mask, self.gateway]):
+ self.module.fail_json(msg='A config_method of dhcp is mutually exclusive with the address,'
+ ' subnet_mask, and gateway options.')
+
+ # A relatively primitive regex to validate that the input is formatted like a valid ip address
+ address_regex = re.compile(r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}')
+
+ if self.address and not address_regex.match(self.address):
+ self.module.fail_json(msg="An invalid ip address was provided for address.")
+
+ if self.subnet_mask and not address_regex.match(self.subnet_mask):
+ self.module.fail_json(msg="An invalid ip address was provided for subnet_mask.")
+
+ if self.gateway and not address_regex.match(self.gateway):
+ self.module.fail_json(msg="An invalid ip address was provided for gateway.")
+
+ @property
+ def interfaces(self):
+ ifaces = list()
+ try:
+ (rc, ifaces) = request(self.url + 'storage-systems/%s/graph/xpath-filter?query=/controller/hostInterfaces'
+ % self.ssid, headers=HEADERS, **self.creds)
+ except Exception as err:
+ self.module.fail_json(
+ msg="Failed to retrieve defined host interfaces. Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(err)))
+
+ # Filter out non-iSCSI interfaces
+ ifaces = [iface['iscsi'] for iface in ifaces if iface['interfaceType'] == 'iscsi']
+
+ return ifaces
+
+ def get_controllers(self):
+ """Retrieve a mapping of controller labels to their references
+ {
+ 'A': '070000000000000000000001',
+ 'B': '070000000000000000000002',
+ }
+ :return: the controllers defined on the system
+ """
+ controllers = list()
+ try:
+ (rc, controllers) = request(self.url + 'storage-systems/%s/graph/xpath-filter?query=/controller/id'
+ % self.ssid, headers=HEADERS, **self.creds)
+ except Exception as err:
+ self.module.fail_json(
+ msg="Failed to retrieve controller list! Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(err)))
+
+ controllers.sort()
+
+ controllers_dict = {}
+ i = ord('A')
+ for controller in controllers:
+ label = chr(i)
+ controllers_dict[label] = controller
+ i += 1
+
+ return controllers_dict
+
+ def fetch_target_interface(self):
+ interfaces = self.interfaces
+
+ for iface in interfaces:
+ if iface['channel'] == self.name and self.controllers[self.controller] == iface['controllerId']:
+ return iface
+
+ channels = sorted(set((str(iface['channel'])) for iface in interfaces
+ if self.controllers[self.controller] == iface['controllerId']))
+
+ self.module.fail_json(msg="The requested channel of %s is not valid. Valid channels include: %s."
+ % (self.name, ", ".join(channels)))
+
+ def make_update_body(self, target_iface):
+ body = dict(iscsiInterface=target_iface['id'])
+ update_required = False
+
+ self._logger.info("Requested state=%s.", self.state)
+ self._logger.info("config_method: current=%s, requested=%s",
+ target_iface['ipv4Data']['ipv4AddressConfigMethod'], self.config_method)
+
+ if self.state == 'enabled':
+ settings = dict()
+ if not target_iface['ipv4Enabled']:
+ update_required = True
+ settings['ipv4Enabled'] = [True]
+ if self.mtu != target_iface['interfaceData']['ethernetData']['maximumFramePayloadSize']:
+ update_required = True
+ settings['maximumFramePayloadSize'] = [self.mtu]
+ if self.config_method == 'static':
+ ipv4Data = target_iface['ipv4Data']['ipv4AddressData']
+
+ if ipv4Data['ipv4Address'] != self.address:
+ update_required = True
+ settings['ipv4Address'] = [self.address]
+ if ipv4Data['ipv4SubnetMask'] != self.subnet_mask:
+ update_required = True
+ settings['ipv4SubnetMask'] = [self.subnet_mask]
+ if self.gateway is not None and ipv4Data['ipv4GatewayAddress'] != self.gateway:
+ update_required = True
+ settings['ipv4GatewayAddress'] = [self.gateway]
+
+ if target_iface['ipv4Data']['ipv4AddressConfigMethod'] != 'configStatic':
+ update_required = True
+ settings['ipv4AddressConfigMethod'] = ['configStatic']
+
+ elif (target_iface['ipv4Data']['ipv4AddressConfigMethod'] != 'configDhcp'):
+ update_required = True
+ settings.update(dict(ipv4Enabled=[True],
+ ipv4AddressConfigMethod=['configDhcp']))
+ body['settings'] = settings
+
+ else:
+ if target_iface['ipv4Enabled']:
+ update_required = True
+ body['settings'] = dict(ipv4Enabled=[False])
+
+ self._logger.info("Update required ?=%s", update_required)
+ self._logger.info("Update body: %s", pformat(body))
+
+ return update_required, body
+
+ def update(self):
+ self.controllers = self.get_controllers()
+ if self.controller not in self.controllers:
+ self.module.fail_json(msg="The provided controller name is invalid. Valid controllers: %s."
+ % ", ".join(self.controllers.keys()))
+
+ iface_before = self.fetch_target_interface()
+ update_required, body = self.make_update_body(iface_before)
+ if update_required and not self.check_mode:
+ try:
+ url = (self.url +
+ 'storage-systems/%s/symbol/setIscsiInterfaceProperties' % self.ssid)
+ (rc, result) = request(url, method='POST', data=json.dumps(body), headers=HEADERS, timeout=300,
+ ignore_errors=True, **self.creds)
+ # We could potentially retry this a few times, but it's probably a rare enough case (unless a playbook
+ # is cancelled mid-flight), that it isn't worth the complexity.
+ if rc == 422 and result['retcode'] in ['busy', '3']:
+ self.module.fail_json(
+ msg="The interface is currently busy (probably processing a previously requested modification"
+ " request). This operation cannot currently be completed. Array Id [%s]. Error [%s]."
+ % (self.ssid, result))
+ # Handle authentication issues, etc.
+ elif rc != 200:
+ self.module.fail_json(
+ msg="Failed to modify the interface! Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(result)))
+ self._logger.debug("Update request completed successfully.")
+ # This is going to catch cases like a connection failure
+ except Exception as err:
+ self.module.fail_json(
+ msg="Connection failure: we failed to modify the interface! Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(err)))
+
+ iface_after = self.fetch_target_interface()
+
+ self.module.exit_json(msg="The interface settings have been updated.", changed=update_required,
+ enabled=iface_after['ipv4Enabled'])
+
+ def __call__(self, *args, **kwargs):
+ self.update()
+
+
+def main():
+ iface = IscsiInterface()
+ iface()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_iscsi_target.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_iscsi_target.py
new file mode 100644
index 000000000..93b53b60c
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_iscsi_target.py
@@ -0,0 +1,297 @@
+#!/usr/bin/python
+
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['deprecated'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = """
+---
+module: netapp_e_iscsi_target
+short_description: NetApp E-Series manage iSCSI target configuration
+description:
+ - Configure the settings of an E-Series iSCSI target
+version_added: '2.7'
+author: Michael Price (@lmprice)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.netapp.eseries
+options:
+ name:
+ description:
+ - The name/alias to assign to the iSCSI target.
+ - This alias is often used by the initiator software in order to make an iSCSI target easier to identify.
+ type: str
+ aliases:
+ - alias
+ ping:
+ description:
+ - Enable ICMP ping responses from the configured iSCSI ports.
+ type: bool
+ default: yes
+ chap_secret:
+ description:
+ - Enable Challenge-Handshake Authentication Protocol (CHAP), utilizing this value as the password.
+ - When this value is specified, we will always trigger an update (changed=True). We have no way of verifying
+ whether or not the password has changed.
+ - The chap secret may only use ascii characters with values between 32 and 126 decimal.
+ - The chap secret must be no less than 12 characters, but no greater than 57 characters in length.
+ - The chap secret is cleared when not specified or an empty string.
+ type: str
+ aliases:
+ - chap
+ - password
+ unnamed_discovery:
+ description:
+ - When an initiator initiates a discovery session to an initiator port, it is considered an unnamed
+ discovery session if the iSCSI target iqn is not specified in the request.
+ - This option may be disabled to increase security if desired.
+ type: bool
+ default: yes
+ log_path:
+ description:
+ - A local path (on the Ansible controller), to a file to be used for debug logging.
+ type: str
+ required: no
+notes:
+ - Check mode is supported.
+ - Some of the settings are dependent on the settings applied to the iSCSI interfaces. These can be configured using
+ M(netapp_e_iscsi_interface).
+ - This module requires a Web Services API version of >= 1.3.
+"""
+
+EXAMPLES = """
+ - name: Enable ping responses and unnamed discovery sessions for all iSCSI ports
+ netapp_e_iscsi_target:
+ api_url: "https://localhost:8443/devmgr/v2"
+ api_username: admin
+ api_password: myPassword
+ ssid: "1"
+ validate_certs: no
+ name: myTarget
+ ping: yes
+ unnamed_discovery: yes
+
+ - name: Set the target alias and the CHAP secret
+ netapp_e_iscsi_target:
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ name: myTarget
+ chap: password1234
+"""
+
+RETURN = """
+msg:
+ description: Success message
+ returned: on success
+ type: str
+ sample: The iSCSI target settings have been updated.
+alias:
+ description:
+ - The alias assigned to the iSCSI target.
+ returned: on success
+ sample: myArray
+ type: str
+iqn:
+ description:
+ - The iqn (iSCSI Qualified Name), assigned to the iSCSI target.
+ returned: on success
+ sample: iqn.1992-08.com.netapp:2800.000a132000b006d2000000005a0e8f45
+ type: str
+"""
+import json
+import logging
+from pprint import pformat
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.netapp import request, eseries_host_argument_spec
+from ansible.module_utils._text import to_native
+
+HEADERS = {
+ "Content-Type": "application/json",
+ "Accept": "application/json",
+}
+
+
+class IscsiTarget(object):
+ def __init__(self):
+ argument_spec = eseries_host_argument_spec()
+ argument_spec.update(dict(
+ name=dict(type='str', required=False, aliases=['alias']),
+ ping=dict(type='bool', required=False, default=True),
+ chap_secret=dict(type='str', required=False, aliases=['chap', 'password'], no_log=True),
+ unnamed_discovery=dict(type='bool', required=False, default=True),
+ log_path=dict(type='str', required=False),
+ ))
+
+ self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, )
+ args = self.module.params
+
+ self.name = args['name']
+ self.ping = args['ping']
+ self.chap_secret = args['chap_secret']
+ self.unnamed_discovery = args['unnamed_discovery']
+
+ self.ssid = args['ssid']
+ self.url = args['api_url']
+ self.creds = dict(url_password=args['api_password'],
+ validate_certs=args['validate_certs'],
+ url_username=args['api_username'], )
+
+ self.check_mode = self.module.check_mode
+ self.post_body = dict()
+ self.controllers = list()
+
+ log_path = args['log_path']
+
+ # logging setup
+ self._logger = logging.getLogger(self.__class__.__name__)
+
+ if log_path:
+ logging.basicConfig(
+ level=logging.DEBUG, filename=log_path, filemode='w',
+ format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s')
+
+ if not self.url.endswith('/'):
+ self.url += '/'
+
+ if self.chap_secret:
+ if len(self.chap_secret) < 12 or len(self.chap_secret) > 57:
+ self.module.fail_json(msg="The provided CHAP secret is not valid, it must be between 12 and 57"
+ " characters in length.")
+
+ for c in self.chap_secret:
+ ordinal = ord(c)
+ if ordinal < 32 or ordinal > 126:
+ self.module.fail_json(msg="The provided CHAP secret is not valid, it may only utilize ascii"
+ " characters with decimal values between 32 and 126.")
+
+ @property
+ def target(self):
+ """Provide information on the iSCSI Target configuration
+
+ Sample:
+ {
+ 'alias': 'myCustomName',
+ 'ping': True,
+ 'unnamed_discovery': True,
+ 'chap': False,
+ 'iqn': 'iqn.1992-08.com.netapp:2800.000a132000b006d2000000005a0e8f45',
+ }
+ """
+ target = dict()
+ try:
+ (rc, data) = request(self.url + 'storage-systems/%s/graph/xpath-filter?query=/storagePoolBundle/target'
+ % self.ssid, headers=HEADERS, **self.creds)
+ # This likely isn't an iSCSI-enabled system
+ if not data:
+ self.module.fail_json(
+ msg="This storage-system doesn't appear to have iSCSI interfaces. Array Id [%s]." % (self.ssid))
+
+ data = data[0]
+
+ chap = any(
+ [auth for auth in data['configuredAuthMethods']['authMethodData'] if auth['authMethod'] == 'chap'])
+
+ target.update(dict(alias=data['alias']['iscsiAlias'],
+ iqn=data['nodeName']['iscsiNodeName'],
+ chap=chap))
+
+ (rc, data) = request(self.url + 'storage-systems/%s/graph/xpath-filter?query=/sa/iscsiEntityData'
+ % self.ssid, headers=HEADERS, **self.creds)
+
+ data = data[0]
+ target.update(dict(ping=data['icmpPingResponseEnabled'],
+ unnamed_discovery=data['unnamedDiscoverySessionsEnabled']))
+
+ except Exception as err:
+ self.module.fail_json(
+ msg="Failed to retrieve the iSCSI target information. Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(err)))
+
+ return target
+
+ def apply_iscsi_settings(self):
+ """Update the iSCSI target alias and CHAP settings"""
+ update = False
+ target = self.target
+
+ body = dict()
+
+ if self.name is not None and self.name != target['alias']:
+ update = True
+ body['alias'] = self.name
+
+ # If the CHAP secret was provided, we trigger an update.
+ if self.chap_secret:
+ update = True
+ body.update(dict(enableChapAuthentication=True,
+ chapSecret=self.chap_secret))
+ # If no secret was provided, then we disable chap
+ elif target['chap']:
+ update = True
+ body.update(dict(enableChapAuthentication=False))
+
+ if update and not self.check_mode:
+ try:
+ request(self.url + 'storage-systems/%s/iscsi/target-settings' % self.ssid, method='POST',
+ data=json.dumps(body), headers=HEADERS, **self.creds)
+ except Exception as err:
+ self.module.fail_json(
+ msg="Failed to update the iSCSI target settings. Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(err)))
+
+ return update
+
+ def apply_target_changes(self):
+ update = False
+ target = self.target
+
+ body = dict()
+
+ if self.ping != target['ping']:
+ update = True
+ body['icmpPingResponseEnabled'] = self.ping
+
+ if self.unnamed_discovery != target['unnamed_discovery']:
+ update = True
+ body['unnamedDiscoverySessionsEnabled'] = self.unnamed_discovery
+
+ self._logger.info(pformat(body))
+ if update and not self.check_mode:
+ try:
+ request(self.url + 'storage-systems/%s/iscsi/entity' % self.ssid, method='POST',
+ data=json.dumps(body), timeout=60, headers=HEADERS, **self.creds)
+ except Exception as err:
+ self.module.fail_json(
+ msg="Failed to update the iSCSI target settings. Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(err)))
+ return update
+
+ def update(self):
+ update = self.apply_iscsi_settings()
+ update = self.apply_target_changes() or update
+
+ target = self.target
+ data = dict((key, target[key]) for key in target if key in ['iqn', 'alias'])
+
+ self.module.exit_json(msg="The interface settings have been updated.", changed=update, **data)
+
+ def __call__(self, *args, **kwargs):
+ self.update()
+
+
+def main():
+ iface = IscsiTarget()
+ iface()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_ldap.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_ldap.py
new file mode 100644
index 000000000..e3bb61e60
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_ldap.py
@@ -0,0 +1,401 @@
+#!/usr/bin/python
+
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['deprecated'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = '''
+---
+module: netapp_e_ldap
+short_description: NetApp E-Series manage LDAP integration to use for authentication
+description:
+ - Configure an E-Series system to allow authentication via an LDAP server
+version_added: '2.7'
+author: Michael Price (@lmprice)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.netapp.eseries
+options:
+ state:
+ description:
+ - Enable/disable LDAP support on the system. Disabling will clear out any existing defined domains.
+ choices:
+ - present
+ - absent
+ default: present
+ type: str
+ identifier:
+ description:
+ - This is a unique identifier for the configuration (for cases where there are multiple domains configured).
+ - If this is not specified, but I(state=present), we will utilize a default value of 'default'.
+ type: str
+ username:
+ description:
+ - This is the user account that will be used for querying the LDAP server.
+ - "Example: CN=MyBindAcct,OU=ServiceAccounts,DC=example,DC=com"
+ required: yes
+ type: str
+ aliases:
+ - bind_username
+ password:
+ description:
+ - This is the password for the bind user account.
+ required: yes
+ type: str
+ aliases:
+ - bind_password
+ attributes:
+ description:
+ - The user attributes that should be considered for the group to role mapping.
+ - Typically this is used with something like 'memberOf', and a user's access is tested against group
+ membership or lack thereof.
+ default: memberOf
+ type: list
+ server:
+ description:
+ - This is the LDAP server url.
+ - The connection string should be specified as using the ldap or ldaps protocol along with the port
+ information.
+ aliases:
+ - server_url
+ required: yes
+ type: str
+ name:
+ description:
+ - The domain name[s] that will be utilized when authenticating to identify which domain to utilize.
+ - Default to use the DNS name of the I(server).
+ - The only requirement is that the name[s] be resolvable.
+ - "Example: user@example.com"
+ required: no
+ type: list
+ search_base:
+ description:
+ - The search base is used to find group memberships of the user.
+ - "Example: ou=users,dc=example,dc=com"
+ required: yes
+ type: str
+ role_mappings:
+ description:
+ - This is where you specify which groups should have access to what permissions for the
+ storage-system.
+ - For example, all users in group A will be assigned all 4 available roles, which will allow access
+ to all the management functionality of the system (super-user). Those in group B only have the
+ storage.monitor role, which will allow only read-only access.
+ - This is specified as a mapping of regular expressions to a list of roles. See the examples.
+ - The roles that will be assigned to to the group/groups matching the provided regex.
+ - storage.admin allows users full read/write access to storage objects and operations.
+ - storage.monitor allows users read-only access to storage objects and operations.
+ - support.admin allows users access to hardware, diagnostic information, the Major Event
+ Log, and other critical support-related functionality, but not the storage configuration.
+ - security.admin allows users access to authentication/authorization configuration, as well
+ as the audit log configuration, and certification management.
+ type: dict
+ required: yes
+ user_attribute:
+ description:
+ - This is the attribute we will use to match the provided username when a user attempts to
+ authenticate.
+ type: str
+ default: sAMAccountName
+ log_path:
+ description:
+ - A local path to a file to be used for debug logging
+ required: no
+ type: str
+notes:
+ - Check mode is supported.
+ - This module allows you to define one or more LDAP domains identified uniquely by I(identifier) to use for
+ authentication. Authorization is determined by I(role_mappings), in that different groups of users may be given
+ different (or no), access to certain aspects of the system and API.
+ - The local user accounts will still be available if the LDAP server becomes unavailable/inaccessible.
+ - Generally, you'll need to get the details of your organization's LDAP server before you'll be able to configure
+ the system for using LDAP authentication; every implementation is likely to be very different.
+ - This API is currently only supported with the Embedded Web Services API v2.0 and higher, or the Web Services Proxy
+ v3.0 and higher.
+'''
+
+EXAMPLES = '''
+ - name: Disable LDAP authentication
+ netapp_e_ldap:
+ api_url: "10.1.1.1:8443"
+ api_username: "admin"
+ api_password: "myPass"
+ ssid: "1"
+ state: absent
+
+ - name: Remove the 'default' LDAP domain configuration
+ netapp_e_ldap:
+ state: absent
+ identifier: default
+
+ - name: Define a new LDAP domain, utilizing defaults where possible
+ netapp_e_ldap:
+ state: present
+ bind_username: "CN=MyBindAccount,OU=ServiceAccounts,DC=example,DC=com"
+ bind_password: "mySecretPass"
+ server: "ldap://example.com:389"
+ search_base: 'OU=Users,DC=example,DC=com'
+ role_mappings:
+ ".*dist-dev-storage.*":
+ - storage.admin
+ - security.admin
+ - support.admin
+ - storage.monitor
+'''
+
+RETURN = """
+msg:
+ description: Success message
+ returned: on success
+ type: str
+ sample: The ldap settings have been updated.
+"""
+
+import json
+import logging
+
+try:
+ import urlparse
+except ImportError:
+ import urllib.parse as urlparse
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.netapp import request, eseries_host_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class Ldap(object):
+ NO_CHANGE_MSG = "No changes were necessary."
+
+ def __init__(self):
+ argument_spec = eseries_host_argument_spec()
+ argument_spec.update(dict(
+ state=dict(type='str', required=False, default='present',
+ choices=['present', 'absent']),
+ identifier=dict(type='str', required=False, ),
+ username=dict(type='str', required=False, aliases=['bind_username']),
+ password=dict(type='str', required=False, aliases=['bind_password'], no_log=True),
+ name=dict(type='list', required=False, ),
+ server=dict(type='str', required=False, aliases=['server_url']),
+ search_base=dict(type='str', required=False, ),
+ role_mappings=dict(type='dict', required=False, ),
+ user_attribute=dict(type='str', required=False, default='sAMAccountName'),
+ attributes=dict(type='list', default=['memberOf'], required=False, ),
+ log_path=dict(type='str', required=False),
+ ))
+
+ required_if = [
+ ["state", "present", ["username", "password", "server", "search_base", "role_mappings", ]]
+ ]
+
+ self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_if=required_if)
+ args = self.module.params
+ self.ldap = args['state'] == 'present'
+ self.identifier = args['identifier']
+ self.username = args['username']
+ self.password = args['password']
+ self.names = args['name']
+ self.server = args['server']
+ self.search_base = args['search_base']
+ self.role_mappings = args['role_mappings']
+ self.user_attribute = args['user_attribute']
+ self.attributes = args['attributes']
+
+ self.ssid = args['ssid']
+ self.url = args['api_url']
+ self.creds = dict(url_password=args['api_password'],
+ validate_certs=args['validate_certs'],
+ url_username=args['api_username'],
+ timeout=60)
+
+ self.check_mode = self.module.check_mode
+
+ log_path = args['log_path']
+
+ # logging setup
+ self._logger = logging.getLogger(self.__class__.__name__)
+
+ if log_path:
+ logging.basicConfig(
+ level=logging.DEBUG, filename=log_path, filemode='w',
+ format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s')
+
+ if not self.url.endswith('/'):
+ self.url += '/'
+
+ self.embedded = None
+ self.base_path = None
+
+ def make_configuration(self):
+ if not self.identifier:
+ self.identifier = 'default'
+
+ if not self.names:
+ parts = urlparse.urlparse(self.server)
+ netloc = parts.netloc
+ if ':' in netloc:
+ netloc = netloc.split(':')[0]
+ self.names = [netloc]
+
+ roles = list()
+ for regex in self.role_mappings:
+ for role in self.role_mappings[regex]:
+ roles.append(dict(groupRegex=regex,
+ ignoreCase=True,
+ name=role))
+
+ domain = dict(id=self.identifier,
+ ldapUrl=self.server,
+ bindLookupUser=dict(user=self.username, password=self.password),
+ roleMapCollection=roles,
+ groupAttributes=self.attributes,
+ names=self.names,
+ searchBase=self.search_base,
+ userAttribute=self.user_attribute,
+ )
+
+ return domain
+
+ def is_embedded(self):
+ """Determine whether or not we're using the embedded or proxy implementation of Web Services"""
+ if self.embedded is None:
+ url = self.url
+ try:
+ parts = urlparse.urlparse(url)
+ parts = parts._replace(path='/devmgr/utils/')
+ url = urlparse.urlunparse(parts)
+
+ (rc, result) = request(url + 'about', **self.creds)
+ self.embedded = not result['runningAsProxy']
+ except Exception as err:
+ self._logger.exception("Failed to retrieve the About information.")
+ self.module.fail_json(msg="Failed to determine the Web Services implementation type!"
+ " Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(err)))
+
+ return self.embedded
+
+ def get_full_configuration(self):
+ try:
+ (rc, result) = request(self.url + self.base_path, **self.creds)
+ return result
+ except Exception as err:
+ self._logger.exception("Failed to retrieve the LDAP configuration.")
+ self.module.fail_json(msg="Failed to retrieve LDAP configuration! Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(err)))
+
+ def get_configuration(self, identifier):
+ try:
+ (rc, result) = request(self.url + self.base_path + '%s' % (identifier), ignore_errors=True, **self.creds)
+ if rc == 200:
+ return result
+ elif rc == 404:
+ return None
+ else:
+ self.module.fail_json(msg="Failed to retrieve LDAP configuration! Array Id [%s]. Error [%s]."
+ % (self.ssid, result))
+ except Exception as err:
+ self._logger.exception("Failed to retrieve the LDAP configuration.")
+ self.module.fail_json(msg="Failed to retrieve LDAP configuration! Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(err)))
+
+ def update_configuration(self):
+ # Define a new domain based on the user input
+ domain = self.make_configuration()
+
+ # This is the current list of configurations
+ current = self.get_configuration(self.identifier)
+
+ update = current != domain
+ msg = "No changes were necessary for [%s]." % self.identifier
+ self._logger.info("Is updated: %s", update)
+ if update and not self.check_mode:
+ msg = "The configuration changes were made for [%s]." % self.identifier
+ try:
+ if current is None:
+ api = self.base_path + 'addDomain'
+ else:
+ api = self.base_path + '%s' % (domain['id'])
+
+ (rc, result) = request(self.url + api, method='POST', data=json.dumps(domain), **self.creds)
+ except Exception as err:
+ self._logger.exception("Failed to modify the LDAP configuration.")
+ self.module.fail_json(msg="Failed to modify LDAP configuration! Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(err)))
+
+ return msg, update
+
+ def clear_single_configuration(self, identifier=None):
+ if identifier is None:
+ identifier = self.identifier
+
+ configuration = self.get_configuration(identifier)
+ updated = False
+ msg = self.NO_CHANGE_MSG
+ if configuration:
+ updated = True
+ msg = "The LDAP domain configuration for [%s] was cleared." % identifier
+ if not self.check_mode:
+ try:
+ (rc, result) = request(self.url + self.base_path + '%s' % identifier, method='DELETE', **self.creds)
+ except Exception as err:
+ self.module.fail_json(msg="Failed to remove LDAP configuration! Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(err)))
+ return msg, updated
+
+ def clear_configuration(self):
+ configuration = self.get_full_configuration()
+ updated = False
+ msg = self.NO_CHANGE_MSG
+ if configuration['ldapDomains']:
+ updated = True
+ msg = "The LDAP configuration for all domains was cleared."
+ if not self.check_mode:
+ try:
+ (rc, result) = request(self.url + self.base_path, method='DELETE', ignore_errors=True, **self.creds)
+
+ # Older versions of NetApp E-Series restAPI does not possess an API to remove all existing configs
+ if rc == 405:
+ for config in configuration['ldapDomains']:
+ self.clear_single_configuration(config['id'])
+
+ except Exception as err:
+ self.module.fail_json(msg="Failed to clear LDAP configuration! Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(err)))
+ return msg, updated
+
+ def get_base_path(self):
+ embedded = self.is_embedded()
+ if embedded:
+ return 'storage-systems/%s/ldap/' % self.ssid
+ else:
+ return '/ldap/'
+
+ def update(self):
+ self.base_path = self.get_base_path()
+
+ if self.ldap:
+ msg, update = self.update_configuration()
+ elif self.identifier:
+ msg, update = self.clear_single_configuration()
+ else:
+ msg, update = self.clear_configuration()
+ self.module.exit_json(msg=msg, changed=update, )
+
+ def __call__(self, *args, **kwargs):
+ self.update()
+
+
+def main():
+ settings = Ldap()
+ settings()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_lun_mapping.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_lun_mapping.py
new file mode 100644
index 000000000..1b190ad32
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_lun_mapping.py
@@ -0,0 +1,291 @@
+#!/usr/bin/python
+
+# (c) 2016, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['deprecated'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = '''
+---
+module: netapp_e_lun_mapping
+author:
+ - Kevin Hulquest (@hulquest)
+ - Nathan Swartz (@ndswartz)
+short_description: NetApp E-Series create, delete, or modify lun mappings
+description:
+ - Create, delete, or modify mappings between a volume and a targeted host/host+ group.
+version_added: "2.2"
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.netapp.eseries
+options:
+ state:
+ description:
+ - Present will ensure the mapping exists, absent will remove the mapping.
+ required: True
+ type: str
+ choices: ["present", "absent"]
+ target:
+ description:
+ - The name of host or hostgroup you wish to assign to the mapping
+ - If omitted, the default hostgroup is used.
+ - If the supplied I(volume_name) is associated with a different target, it will be updated to what is supplied here.
+ type: str
+ required: False
+ volume_name:
+ description:
+ - The name of the volume you wish to include in the mapping.
+ required: True
+ type: str
+ aliases:
+ - volume
+ lun:
+ description:
+ - The LUN value you wish to give the mapping.
+ - If the supplied I(volume_name) is associated with a different LUN, it will be updated to what is supplied here.
+ - LUN value will be determine by the storage-system when not specified.
+ version_added: 2.7
+ type: int
+ required: no
+ target_type:
+ description:
+ - This option specifies the whether the target should be a host or a group of hosts
+ - Only necessary when the target name is used for both a host and a group of hosts
+ choices:
+ - host
+ - group
+ version_added: 2.7
+ type: str
+ required: no
+'''
+
+EXAMPLES = '''
+---
+ - name: Map volume1 to the host target host1
+ netapp_e_lun_mapping:
+ ssid: 1
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: no
+ state: present
+ target: host1
+ volume: volume1
+ - name: Delete the lun mapping between volume1 and host1
+ netapp_e_lun_mapping:
+ ssid: 1
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: yes
+ state: absent
+ target: host1
+ volume: volume1
+'''
+RETURN = '''
+msg:
+ description: success of the module
+ returned: always
+ type: str
+ sample: Lun mapping is complete
+'''
+import json
+import logging
+from pprint import pformat
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.netapp import request, eseries_host_argument_spec
+from ansible.module_utils._text import to_native
+
+HEADERS = {
+ "Content-Type": "application/json",
+ "Accept": "application/json"
+}
+
+
+class LunMapping(object):
+ def __init__(self):
+ argument_spec = eseries_host_argument_spec()
+ argument_spec.update(dict(
+ state=dict(required=True, choices=["present", "absent"]),
+ target=dict(required=False, default=None),
+ volume_name=dict(required=True, aliases=["volume"]),
+ lun=dict(type="int", required=False),
+ target_type=dict(required=False, choices=["host", "group"])))
+ self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+ args = self.module.params
+
+ self.state = args["state"] in ["present"]
+ self.target = args["target"]
+ self.volume = args["volume_name"]
+ self.lun = args["lun"]
+ self.target_type = args["target_type"]
+ self.ssid = args["ssid"]
+ self.url = args["api_url"]
+ self.check_mode = self.module.check_mode
+ self.creds = dict(url_username=args["api_username"],
+ url_password=args["api_password"],
+ validate_certs=args["validate_certs"])
+ self.mapping_info = None
+
+ if not self.url.endswith('/'):
+ self.url += '/'
+
+ def update_mapping_info(self):
+ """Collect the current state of the storage array."""
+ response = None
+ try:
+ rc, response = request(self.url + "storage-systems/%s/graph" % self.ssid,
+ method="GET", headers=HEADERS, **self.creds)
+
+ except Exception as error:
+ self.module.fail_json(
+ msg="Failed to retrieve storage array graph. Id [%s]. Error [%s]" % (self.ssid, to_native(error)))
+
+ # Create dictionary containing host/cluster references mapped to their names
+ target_reference = {}
+ target_name = {}
+ target_type = {}
+
+ if self.target_type is None or self.target_type == "host":
+ for host in response["storagePoolBundle"]["host"]:
+ target_reference.update({host["hostRef"]: host["name"]})
+ target_name.update({host["name"]: host["hostRef"]})
+ target_type.update({host["name"]: "host"})
+
+ if self.target_type is None or self.target_type == "group":
+ for cluster in response["storagePoolBundle"]["cluster"]:
+
+ # Verify there is no ambiguity between target's type (ie host and group has the same name)
+ if self.target and self.target_type is None and cluster["name"] == self.target and \
+ self.target in target_name.keys():
+ self.module.fail_json(msg="Ambiguous target type: target name is used for both host and group"
+ " targets! Id [%s]" % self.ssid)
+
+ target_reference.update({cluster["clusterRef"]: cluster["name"]})
+ target_name.update({cluster["name"]: cluster["clusterRef"]})
+ target_type.update({cluster["name"]: "group"})
+
+ volume_reference = {}
+ volume_name = {}
+ lun_name = {}
+ for volume in response["volume"]:
+ volume_reference.update({volume["volumeRef"]: volume["name"]})
+ volume_name.update({volume["name"]: volume["volumeRef"]})
+ if volume["listOfMappings"]:
+ lun_name.update({volume["name"]: volume["listOfMappings"][0]["lun"]})
+ for volume in response["highLevelVolBundle"]["thinVolume"]:
+ volume_reference.update({volume["volumeRef"]: volume["name"]})
+ volume_name.update({volume["name"]: volume["volumeRef"]})
+ if volume["listOfMappings"]:
+ lun_name.update({volume["name"]: volume["listOfMappings"][0]["lun"]})
+
+ # Build current mapping object
+ self.mapping_info = dict(lun_mapping=[dict(volume_reference=mapping["volumeRef"],
+ map_reference=mapping["mapRef"],
+ lun_mapping_reference=mapping["lunMappingRef"],
+ lun=mapping["lun"]
+ ) for mapping in response["storagePoolBundle"]["lunMapping"]],
+ volume_by_reference=volume_reference,
+ volume_by_name=volume_name,
+ lun_by_name=lun_name,
+ target_by_reference=target_reference,
+ target_by_name=target_name,
+ target_type_by_name=target_type)
+
+ def get_lun_mapping(self):
+ """Find the matching lun mapping reference.
+
+ Returns: tuple(bool, int, int): contains volume match, volume mapping reference and mapping lun
+ """
+ target_match = False
+ reference = None
+ lun = None
+
+ self.update_mapping_info()
+
+ # Verify that when a lun is specified that it does not match an existing lun value unless it is associated with
+ # the specified volume (ie for an update)
+ if self.lun and any((self.lun == lun_mapping["lun"] and
+ self.target == self.mapping_info["target_by_reference"][lun_mapping["map_reference"]] and
+ self.volume != self.mapping_info["volume_by_reference"][lun_mapping["volume_reference"]]
+ ) for lun_mapping in self.mapping_info["lun_mapping"]):
+ self.module.fail_json(msg="Option lun value is already in use for target! Array Id [%s]." % self.ssid)
+
+ # Verify that when target_type is specified then it matches the target's actually type
+ if self.target and self.target_type and self.target in self.mapping_info["target_type_by_name"].keys() and \
+ self.mapping_info["target_type_by_name"][self.target] != self.target_type:
+ self.module.fail_json(
+ msg="Option target does not match the specified target_type! Id [%s]." % self.ssid)
+
+ # Verify volume and target exist if needed for expected state.
+ if self.state:
+ if self.volume not in self.mapping_info["volume_by_name"].keys():
+ self.module.fail_json(msg="Volume does not exist. Id [%s]." % self.ssid)
+ if self.target and self.target not in self.mapping_info["target_by_name"].keys():
+ self.module.fail_json(msg="Target does not exist. Id [%s'." % self.ssid)
+
+ for lun_mapping in self.mapping_info["lun_mapping"]:
+
+ # Find matching volume reference
+ if lun_mapping["volume_reference"] == self.mapping_info["volume_by_name"][self.volume]:
+ reference = lun_mapping["lun_mapping_reference"]
+ lun = lun_mapping["lun"]
+
+ # Determine if lun mapping is attached to target with the
+ if (lun_mapping["map_reference"] in self.mapping_info["target_by_reference"].keys() and
+ self.mapping_info["target_by_reference"][lun_mapping["map_reference"]] == self.target and
+ (self.lun is None or lun == self.lun)):
+ target_match = True
+
+ return target_match, reference, lun
+
+ def update(self):
+ """Execute the changes the require changes on the storage array."""
+ target_match, lun_reference, lun = self.get_lun_mapping()
+ update = (self.state and not target_match) or (not self.state and target_match)
+
+ if update and not self.check_mode:
+ try:
+ if self.state:
+ body = dict()
+ target = None if not self.target else self.mapping_info["target_by_name"][self.target]
+ if target:
+ body.update(dict(targetId=target))
+ if self.lun is not None:
+ body.update(dict(lun=self.lun))
+
+ if lun_reference:
+
+ rc, response = request(self.url + "storage-systems/%s/volume-mappings/%s/move"
+ % (self.ssid, lun_reference), method="POST", data=json.dumps(body),
+ headers=HEADERS, **self.creds)
+ else:
+ body.update(dict(mappableObjectId=self.mapping_info["volume_by_name"][self.volume]))
+ rc, response = request(self.url + "storage-systems/%s/volume-mappings" % self.ssid,
+ method="POST", data=json.dumps(body), headers=HEADERS, **self.creds)
+
+ else: # Remove existing lun mapping for volume and target
+ rc, response = request(self.url + "storage-systems/%s/volume-mappings/%s"
+ % (self.ssid, lun_reference),
+ method="DELETE", headers=HEADERS, **self.creds)
+ except Exception as error:
+ self.module.fail_json(
+ msg="Failed to update storage array lun mapping. Id [%s]. Error [%s]"
+ % (self.ssid, to_native(error)))
+
+ self.module.exit_json(msg="Lun mapping is complete.", changed=update)
+
+
+def main():
+ lun_mapping = LunMapping()
+ lun_mapping.update()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_mgmt_interface.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_mgmt_interface.py
new file mode 100644
index 000000000..8a5e4f8e5
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_mgmt_interface.py
@@ -0,0 +1,723 @@
+#!/usr/bin/python
+
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['deprecated'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = """
+---
+module: netapp_e_mgmt_interface
+short_description: NetApp E-Series management interface configuration
+description:
+ - Configure the E-Series management interfaces
+version_added: '2.7'
+author:
+ - Michael Price (@lmprice)
+ - Nathan Swartz (@ndswartz)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.netapp.eseries
+options:
+ state:
+ description:
+ - Enable or disable IPv4 network interface configuration.
+ - Either IPv4 or IPv6 must be enabled otherwise error will occur.
+ - Only required when enabling or disabling IPv4 network interface
+ choices:
+ - enable
+ - disable
+ required: no
+ type: str
+ aliases:
+ - enable_interface
+ controller:
+ description:
+ - The controller that owns the port you want to configure.
+ - Controller names are represented alphabetically, with the first controller as A,
+ the second as B, and so on.
+ - Current hardware models have either 1 or 2 available controllers, but that is not a guaranteed hard
+ limitation and could change in the future.
+ required: yes
+ type: str
+ choices:
+ - A
+ - B
+ name:
+ description:
+ - The port to modify the configuration for.
+ - The list of choices is not necessarily comprehensive. It depends on the number of ports
+ that are present in the system.
+ - The name represents the port number (typically from left to right on the controller),
+ beginning with a value of 1.
+ - Mutually exclusive with I(channel).
+ type: str
+ aliases:
+ - port
+ - iface
+ channel:
+ description:
+ - The port to modify the configuration for.
+ - The channel represents the port number (typically from left to right on the controller),
+ beginning with a value of 1.
+ - Mutually exclusive with I(name).
+ type: int
+ address:
+ description:
+ - The IPv4 address to assign to the interface.
+ - Should be specified in xx.xx.xx.xx form.
+ - Mutually exclusive with I(config_method=dhcp)
+ type: str
+ required: no
+ subnet_mask:
+ description:
+ - The subnet mask to utilize for the interface.
+ - Should be specified in xx.xx.xx.xx form.
+ - Mutually exclusive with I(config_method=dhcp)
+ type: str
+ required: no
+ gateway:
+ description:
+ - The IPv4 gateway address to utilize for the interface.
+ - Should be specified in xx.xx.xx.xx form.
+ - Mutually exclusive with I(config_method=dhcp)
+ type: str
+ required: no
+ config_method:
+ description:
+ - The configuration method type to use for network interface ports.
+ - dhcp is mutually exclusive with I(address), I(subnet_mask), and I(gateway).
+ choices:
+ - dhcp
+ - static
+ type: str
+ required: no
+ dns_config_method:
+ description:
+ - The configuration method type to use for DNS services.
+ - dhcp is mutually exclusive with I(dns_address), and I(dns_address_backup).
+ choices:
+ - dhcp
+ - static
+ type: str
+ required: no
+ dns_address:
+ description:
+ - Primary IPv4 DNS server address
+ type: str
+ required: no
+ dns_address_backup:
+ description:
+ - Backup IPv4 DNS server address
+ - Queried when primary DNS server fails
+ type: str
+ required: no
+ ntp_config_method:
+ description:
+ - The configuration method type to use for NTP services.
+ - disable is mutually exclusive with I(ntp_address) and I(ntp_address_backup).
+ - dhcp is mutually exclusive with I(ntp_address) and I(ntp_address_backup).
+ choices:
+ - disable
+ - dhcp
+ - static
+ type: str
+ required: no
+ ntp_address:
+ description:
+ - Primary IPv4 NTP server address
+ type: str
+ required: no
+ ntp_address_backup:
+ description:
+ - Backup IPv4 NTP server address
+ - Queried when primary NTP server fails
+ required: no
+ type: str
+ ssh:
+ type: bool
+ description:
+ - Enable ssh access to the controller for debug purposes.
+ - This is a controller-level setting.
+ - rlogin/telnet will be enabled for ancient equipment where ssh is not available.
+ required: no
+ log_path:
+ description:
+ - A local path to a file to be used for debug logging
+ type: str
+ required: no
+notes:
+ - Check mode is supported.
+ - The interface settings are applied synchronously, but changes to the interface itself (receiving a new IP address
+ via dhcp, etc), can take seconds or minutes longer to take effect.
+ - "Known issue: Changes specifically to down ports will result in a failure. However, this may not be the case in up
+ coming NetApp E-Series firmware releases (released after firmware version 11.40.2)."
+"""
+
+EXAMPLES = """
+ - name: Configure the first port on the A controller with a static IPv4 address
+ netapp_e_mgmt_interface:
+ channel: 1
+ controller: "A"
+ config_method: static
+ address: "192.168.1.100"
+ subnet_mask: "255.255.255.0"
+ gateway: "192.168.1.1"
+ ssid: "1"
+ api_url: "10.1.1.1:8443"
+ api_username: "admin"
+ api_password: "myPass"
+
+ - name: Disable ipv4 connectivity for the second port on the B controller
+ netapp_e_mgmt_interface:
+ channel: 2
+ controller: "B"
+ enable_interface: no
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+
+ - name: Enable ssh access for ports one and two on controller A
+ netapp_e_mgmt_interface:
+ channel: "{{ item }}"
+ controller: "A"
+ ssh: yes
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ loop:
+ - 1
+ - 2
+
+ - name: Configure static DNS settings for the first port on controller A
+ netapp_e_mgmt_interface:
+ channel: 1
+ controller: "A"
+ dns_config_method: static
+ dns_address: "192.168.1.100"
+ dns_address_backup: "192.168.1.1"
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+
+ - name: Configure static NTP settings for ports one and two on controller B
+ netapp_e_mgmt_interface:
+ channel: "{{ item }}"
+ controller: "B"
+ ntp_config_method: static
+ ntp_address: "129.100.1.100"
+ ntp_address_backup: "127.100.1.1"
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ loop:
+ - 1
+ - 2
+"""
+
+RETURN = """
+msg:
+ description: Success message
+ returned: on success
+ type: str
+ sample: The interface settings have been updated.
+enabled:
+ description:
+ - Indicates whether IPv4 connectivity has been enabled or disabled.
+ - This does not necessarily indicate connectivity. If dhcp was enabled absent a dhcp server, for instance,
+ it is unlikely that the configuration will actually be valid.
+ returned: on success
+ sample: True
+ type: bool
+"""
+import json
+import logging
+from pprint import pformat, pprint
+import time
+import socket
+
+try:
+ import urlparse
+except ImportError:
+ import urllib.parse as urlparse
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.netapp import request, eseries_host_argument_spec
+from ansible.module_utils._text import to_native
+
+HEADERS = {
+ "Content-Type": "application/json",
+ "Accept": "application/json",
+}
+
+
+class MgmtInterface(object):
+ MAX_RETRIES = 15
+
+ def __init__(self):
+ argument_spec = eseries_host_argument_spec()
+ argument_spec.update(dict(
+ state=dict(type="str", choices=["enable", "disable"],
+ aliases=["enable_interface"], required=False),
+ controller=dict(type="str", required=True, choices=["A", "B"]),
+ name=dict(type="str", aliases=["port", "iface"]),
+ channel=dict(type="int"),
+ address=dict(type="str", required=False),
+ subnet_mask=dict(type="str", required=False),
+ gateway=dict(type="str", required=False),
+ config_method=dict(type="str", required=False, choices=["dhcp", "static"]),
+ dns_config_method=dict(type="str", required=False, choices=["dhcp", "static"]),
+ dns_address=dict(type="str", required=False),
+ dns_address_backup=dict(type="str", required=False),
+ ntp_config_method=dict(type="str", required=False, choices=["disable", "dhcp", "static"]),
+ ntp_address=dict(type="str", required=False),
+ ntp_address_backup=dict(type="str", required=False),
+ ssh=dict(type="bool", required=False),
+ log_path=dict(type="str", required=False),
+ ))
+
+ required_if = [
+ ["state", "enable", ["config_method"]],
+ ["config_method", "static", ["address", "subnet_mask"]],
+ ["dns_config_method", "static", ["dns_address"]],
+ ["ntp_config_method", "static", ["ntp_address"]],
+ ]
+
+ mutually_exclusive = [
+ ["name", "channel"],
+ ]
+
+ self.module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=required_if,
+ mutually_exclusive=mutually_exclusive)
+ args = self.module.params
+
+ self.controller = args["controller"]
+ self.name = args["name"]
+ self.channel = args["channel"]
+
+ self.config_method = args["config_method"]
+ self.address = args["address"]
+ self.subnet_mask = args["subnet_mask"]
+ self.gateway = args["gateway"]
+ self.enable_interface = None if args["state"] is None else args["state"] == "enable"
+
+ self.dns_config_method = args["dns_config_method"]
+ self.dns_address = args["dns_address"]
+ self.dns_address_backup = args["dns_address_backup"]
+
+ self.ntp_config_method = args["ntp_config_method"]
+ self.ntp_address = args["ntp_address"]
+ self.ntp_address_backup = args["ntp_address_backup"]
+
+ self.ssh = args["ssh"]
+
+ self.ssid = args["ssid"]
+ self.url = args["api_url"]
+ self.creds = dict(url_password=args["api_password"],
+ validate_certs=args["validate_certs"],
+ url_username=args["api_username"], )
+
+ self.retries = 0
+
+ self.check_mode = self.module.check_mode
+ self.post_body = dict()
+
+ log_path = args["log_path"]
+
+ # logging setup
+ self._logger = logging.getLogger(self.__class__.__name__)
+
+ if log_path:
+ logging.basicConfig(
+ level=logging.DEBUG, filename=log_path, filemode='w',
+ format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s')
+
+ if not self.url.endswith('/'):
+ self.url += '/'
+
+ @property
+ def controllers(self):
+ """Retrieve a mapping of controller labels to their references
+ {
+ 'A': '070000000000000000000001',
+ 'B': '070000000000000000000002',
+ }
+ :return: the controllers defined on the system
+ """
+ try:
+ (rc, controllers) = request(self.url + 'storage-systems/%s/controllers'
+ % self.ssid, headers=HEADERS, **self.creds)
+ except Exception as err:
+ controllers = list()
+ self.module.fail_json(
+ msg="Failed to retrieve the controller settings. Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(err)))
+
+ controllers.sort(key=lambda c: c['physicalLocation']['slot'])
+
+ controllers_dict = dict()
+ i = ord('A')
+ for controller in controllers:
+ label = chr(i)
+ settings = dict(controllerSlot=controller['physicalLocation']['slot'],
+ controllerRef=controller['controllerRef'],
+ ssh=controller['networkSettings']['remoteAccessEnabled'])
+ controllers_dict[label] = settings
+ i += 1
+
+ return controllers_dict
+
+ @property
+ def interface(self):
+ net_interfaces = list()
+ try:
+ (rc, net_interfaces) = request(self.url + 'storage-systems/%s/configuration/ethernet-interfaces'
+ % self.ssid, headers=HEADERS, **self.creds)
+ except Exception as err:
+ self.module.fail_json(
+ msg="Failed to retrieve defined management interfaces. Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(err)))
+
+ controllers = self.controllers
+ controller = controllers[self.controller]
+
+ net_interfaces = [iface for iface in net_interfaces if iface["controllerRef"] == controller["controllerRef"]]
+
+ # Find the correct interface
+ iface = None
+ for net in net_interfaces:
+ if self.name:
+ if net["alias"] == self.name or net["interfaceName"] == self.name:
+ iface = net
+ break
+ elif self.channel:
+ if net["channel"] == self.channel:
+ iface = net
+ break
+
+ if iface is None:
+ identifier = self.name if self.name is not None else self.channel
+ self.module.fail_json(msg="We could not find an interface matching [%s] on Array=[%s]."
+ % (identifier, self.ssid))
+
+ return dict(alias=iface["alias"],
+ channel=iface["channel"],
+ link_status=iface["linkStatus"],
+ enabled=iface["ipv4Enabled"],
+ address=iface["ipv4Address"],
+ gateway=iface["ipv4GatewayAddress"],
+ subnet_mask=iface["ipv4SubnetMask"],
+ dns_config_method=iface["dnsProperties"]["acquisitionProperties"]["dnsAcquisitionType"],
+ dns_servers=iface["dnsProperties"]["acquisitionProperties"]["dnsServers"],
+ ntp_config_method=iface["ntpProperties"]["acquisitionProperties"]["ntpAcquisitionType"],
+ ntp_servers=iface["ntpProperties"]["acquisitionProperties"]["ntpServers"],
+ config_method=iface["ipv4AddressConfigMethod"],
+ controllerRef=iface["controllerRef"],
+ controllerSlot=iface["controllerSlot"],
+ ipv6Enabled=iface["ipv6Enabled"],
+ id=iface["interfaceRef"], )
+
+ def get_enable_interface_settings(self, iface, expected_iface, update, body):
+ """Enable or disable the IPv4 network interface."""
+ if self.enable_interface:
+ if not iface["enabled"]:
+ update = True
+ body["ipv4Enabled"] = True
+ else:
+ if iface["enabled"]:
+ update = True
+ body["ipv4Enabled"] = False
+
+ expected_iface["enabled"] = body["ipv4Enabled"]
+ return update, expected_iface, body
+
+ def get_interface_settings(self, iface, expected_iface, update, body):
+ """Update network interface settings."""
+
+ if self.config_method == "dhcp":
+ if iface["config_method"] != "configDhcp":
+ update = True
+ body["ipv4AddressConfigMethod"] = "configDhcp"
+
+ else:
+ if iface["config_method"] != "configStatic":
+ update = True
+ body["ipv4AddressConfigMethod"] = "configStatic"
+
+ if iface["address"] != self.address:
+ update = True
+ body["ipv4Address"] = self.address
+
+ if iface["subnet_mask"] != self.subnet_mask:
+ update = True
+ body["ipv4SubnetMask"] = self.subnet_mask
+
+ if self.gateway and iface["gateway"] != self.gateway:
+ update = True
+ body["ipv4GatewayAddress"] = self.gateway
+
+ expected_iface["address"] = body["ipv4Address"]
+ expected_iface["subnet_mask"] = body["ipv4SubnetMask"]
+ expected_iface["gateway"] = body["ipv4GatewayAddress"]
+
+ expected_iface["config_method"] = body["ipv4AddressConfigMethod"]
+
+ return update, expected_iface, body
+
+ def get_dns_server_settings(self, iface, expected_iface, update, body):
+ """Add DNS server information to the request body."""
+ if self.dns_config_method == "dhcp":
+ if iface["dns_config_method"] != "dhcp":
+ update = True
+ body["dnsAcquisitionDescriptor"] = dict(dnsAcquisitionType="dhcp")
+
+ elif self.dns_config_method == "static":
+ dns_servers = [dict(addressType="ipv4", ipv4Address=self.dns_address)]
+ if self.dns_address_backup:
+ dns_servers.append(dict(addressType="ipv4", ipv4Address=self.dns_address_backup))
+
+ body["dnsAcquisitionDescriptor"] = dict(dnsAcquisitionType="stat", dnsServers=dns_servers)
+
+ if (iface["dns_config_method"] != "stat" or
+ len(iface["dns_servers"]) != len(dns_servers) or
+ (len(iface["dns_servers"]) == 2 and
+ (iface["dns_servers"][0]["ipv4Address"] != self.dns_address or
+ iface["dns_servers"][1]["ipv4Address"] != self.dns_address_backup)) or
+ (len(iface["dns_servers"]) == 1 and
+ iface["dns_servers"][0]["ipv4Address"] != self.dns_address)):
+ update = True
+
+ expected_iface["dns_servers"] = dns_servers
+
+ expected_iface["dns_config_method"] = body["dnsAcquisitionDescriptor"]["dnsAcquisitionType"]
+ return update, expected_iface, body
+
+ def get_ntp_server_settings(self, iface, expected_iface, update, body):
+ """Add NTP server information to the request body."""
+ if self.ntp_config_method == "disable":
+ if iface["ntp_config_method"] != "disabled":
+ update = True
+ body["ntpAcquisitionDescriptor"] = dict(ntpAcquisitionType="disabled")
+
+ elif self.ntp_config_method == "dhcp":
+ if iface["ntp_config_method"] != "dhcp":
+ update = True
+ body["ntpAcquisitionDescriptor"] = dict(ntpAcquisitionType="dhcp")
+
+ elif self.ntp_config_method == "static":
+ ntp_servers = [dict(addrType="ipvx", ipvxAddress=dict(addressType="ipv4", ipv4Address=self.ntp_address))]
+ if self.ntp_address_backup:
+ ntp_servers.append(dict(addrType="ipvx",
+ ipvxAddress=dict(addressType="ipv4", ipv4Address=self.ntp_address_backup)))
+
+ body["ntpAcquisitionDescriptor"] = dict(ntpAcquisitionType="stat", ntpServers=ntp_servers)
+
+ if (iface["ntp_config_method"] != "stat" or
+ len(iface["ntp_servers"]) != len(ntp_servers) or
+ ((len(iface["ntp_servers"]) == 2 and
+ (iface["ntp_servers"][0]["ipvxAddress"]["ipv4Address"] != self.ntp_address or
+ iface["ntp_servers"][1]["ipvxAddress"]["ipv4Address"] != self.ntp_address_backup)) or
+ (len(iface["ntp_servers"]) == 1 and
+ iface["ntp_servers"][0]["ipvxAddress"]["ipv4Address"] != self.ntp_address))):
+ update = True
+
+ expected_iface["ntp_servers"] = ntp_servers
+
+ expected_iface["ntp_config_method"] = body["ntpAcquisitionDescriptor"]["ntpAcquisitionType"]
+ return update, expected_iface, body
+
+ def get_remote_ssh_settings(self, settings, update, body):
+ """Configure network interface ports for remote ssh access."""
+ if self.ssh != settings["ssh"]:
+ update = True
+
+ body["enableRemoteAccess"] = self.ssh
+ return update, body
+
+ def update_array(self, settings, iface):
+ """Update controller with new interface, dns service, ntp service and/or remote ssh access information.
+
+ :returns: whether information passed will modify the controller's current state
+ :rtype: bool
+ """
+ update = False
+ body = dict(controllerRef=settings['controllerRef'],
+ interfaceRef=iface['id'])
+ expected_iface = iface.copy()
+
+ # Check if api url is using the effected management interface to change itself
+ update_used_matching_address = False
+ if self.enable_interface and self.config_method:
+ netloc = list(urlparse.urlparse(self.url))[1]
+ address = netloc.split(":")[0]
+ address_info = socket.getaddrinfo(address, 8443)
+ url_address_info = socket.getaddrinfo(iface["address"], 8443)
+ update_used_matching_address = any(info in url_address_info for info in address_info)
+
+ self._logger.info("update_used_matching_address: %s", update_used_matching_address)
+
+ # Populate the body of the request and check for changes
+ if self.enable_interface is not None:
+ update, expected_iface, body = self.get_enable_interface_settings(iface, expected_iface, update, body)
+
+ if self.config_method is not None:
+ update, expected_iface, body = self.get_interface_settings(iface, expected_iface, update, body)
+
+ if self.dns_config_method is not None:
+ update, expected_iface, body = self.get_dns_server_settings(iface, expected_iface, update, body)
+
+ if self.ntp_config_method is not None:
+ update, expected_iface, body = self.get_ntp_server_settings(iface, expected_iface, update, body)
+
+ if self.ssh is not None:
+ update, body = self.get_remote_ssh_settings(settings, update, body)
+ iface["ssh"] = self.ssh
+ expected_iface["ssh"] = self.ssh
+
+ # debug information
+ self._logger.info(pformat(body))
+ self._logger.info(pformat(iface))
+ self._logger.info(pformat(expected_iface))
+
+ if self.check_mode:
+ return update
+
+ if update and not self.check_mode:
+ if not update_used_matching_address:
+ try:
+ (rc, data) = request(self.url + 'storage-systems/%s/configuration/ethernet-interfaces'
+ % self.ssid, method='POST', data=json.dumps(body), headers=HEADERS,
+ timeout=300, ignore_errors=True, **self.creds)
+ if rc == 422:
+ if data['retcode'] == "4" or data['retcode'] == "illegalParam":
+ if not (body['ipv4Enabled'] or iface['ipv6Enabled']):
+ self.module.fail_json(msg="This storage-system already has IPv6 connectivity disabled. "
+ "DHCP configuration for IPv4 is required at a minimum."
+ " Array Id [%s] Message [%s]."
+ % (self.ssid, data['errorMessage']))
+ else:
+ self.module.fail_json(msg="We failed to configure the management interface. Array Id "
+ "[%s] Message [%s]." % (self.ssid, data))
+ elif rc >= 300:
+ self.module.fail_json(
+ msg="We failed to configure the management interface. Array Id [%s] Message [%s]." %
+ (self.ssid, data))
+
+ # This is going to catch cases like a connection failure
+ except Exception as err:
+ self.module.fail_json(
+ msg="Connection failure: we failed to modify the network settings! Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(err)))
+ else:
+ self.update_api_address_interface_match(body)
+
+ return self.validate_changes(expected_iface) if update and iface["link_status"] != "up" else update
+
+ def update_api_address_interface_match(self, body):
+ """Change network interface address which matches the api_address"""
+ try:
+ try:
+ (rc, data) = request(self.url + 'storage-systems/%s/configuration/ethernet-interfaces' % self.ssid,
+ use_proxy=False, force=True, ignore_errors=True, method='POST',
+ data=json.dumps(body), headers=HEADERS, timeout=10, **self.creds)
+ except Exception:
+ url_parts = list(urlparse.urlparse(self.url))
+ domain = url_parts[1].split(":")
+ domain[0] = self.address
+ url_parts[1] = ":".join(domain)
+ expected_url = urlparse.urlunparse(url_parts)
+ self._logger.info(pformat(expected_url))
+
+ (rc, data) = request(expected_url + 'storage-systems/%s/configuration/ethernet-interfaces' % self.ssid,
+ headers=HEADERS, timeout=300, **self.creds)
+ return
+ except Exception as err:
+ self._logger.info(type(err))
+ self.module.fail_json(
+ msg="Connection failure: we failed to modify the network settings! Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(err)))
+
+ def validate_changes(self, expected_iface, retry=6):
+ """Validate interface changes were applied to the controller interface port. 30 second timeout"""
+ if self.interface != expected_iface:
+ time.sleep(5)
+ if retry:
+ return self.validate_changes(expected_iface, retry - 1)
+
+ self.module.fail_json(msg="Update failure: we failed to verify the necessary state change.")
+
+ return True
+
+ def check_health(self):
+ """It's possible, due to a previous operation, for the API to report a 424 (offline) status for the
+ storage-system. Therefore, we run a manual check with retries to attempt to contact the system before we
+ continue.
+ """
+ try:
+ (rc, data) = request(self.url + 'storage-systems/%s/controllers'
+ % self.ssid, headers=HEADERS,
+ ignore_errors=True, **self.creds)
+
+ # We've probably recently changed the interface settings and it's still coming back up: retry.
+ if rc == 424:
+ if self.retries < self.MAX_RETRIES:
+ self.retries += 1
+ self._logger.info("We hit a 424, retrying in 5s.")
+ time.sleep(5)
+ self.check_health()
+ else:
+ self.module.fail_json(
+ msg="We failed to pull storage-system information. Array Id [%s] Message [%s]." %
+ (self.ssid, data))
+ elif rc >= 300:
+ self.module.fail_json(
+ msg="We failed to pull storage-system information. Array Id [%s] Message [%s]." %
+ (self.ssid, data))
+ # This is going to catch cases like a connection failure
+ except Exception as err:
+ if self.retries < self.MAX_RETRIES:
+ self._logger.info("We hit a connection failure, retrying in 5s.")
+ self.retries += 1
+ time.sleep(5)
+ self.check_health()
+ else:
+ self.module.fail_json(
+ msg="Connection failure: we failed to modify the network settings! Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(err)))
+
+ def update(self):
+ """Update storage system with necessary changes."""
+ # Check if the storage array can be contacted
+ self.check_health()
+
+ # make the necessary changes to the storage system
+ settings = self.controllers[self.controller]
+ iface = self.interface
+ self._logger.info(pformat(settings))
+ self._logger.info(pformat(iface))
+ update = self.update_array(settings, iface)
+
+ self.module.exit_json(msg="The interface settings have been updated.", changed=update)
+
+ def __call__(self, *args, **kwargs):
+ self.update()
+
+
+def main():
+ iface = MgmtInterface()
+ iface()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_snapshot_group.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_snapshot_group.py
new file mode 100644
index 000000000..8bcee43fc
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_snapshot_group.py
@@ -0,0 +1,376 @@
+#!/usr/bin/python
+
+# (c) 2016, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['deprecated'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = """
+---
+module: netapp_e_snapshot_group
+short_description: NetApp E-Series manage snapshot groups
+description:
+ - Create, update, delete snapshot groups for NetApp E-series storage arrays
+version_added: '2.2'
+author: Kevin Hulquest (@hulquest)
+options:
+ ssid:
+ description:
+ - Storage system identifier
+ type: str
+ api_username:
+ required: true
+ description:
+ - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ type: str
+ api_password:
+ required: true
+ description:
+ - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ type: str
+ api_url:
+ required: true
+ description:
+ - The url to the SANtricity WebServices Proxy or embedded REST API.
+ type: str
+ validate_certs:
+ required: false
+ default: true
+ description:
+ - Should https certificates be validated?
+ type: bool
+ state:
+ description:
+ - Whether to ensure the group is present or absent.
+ required: True
+ type: str
+ choices:
+ - present
+ - absent
+ name:
+ description:
+ - The name to give the snapshot group
+ type: str
+ required: True
+ base_volume_name:
+ description:
+ - The name of the base volume or thin volume to use as the base for the new snapshot group.
+ - If a snapshot group with an identical C(name) already exists but with a different base volume
+ an error will be returned.
+ type: str
+ required: True
+ repo_pct:
+ description:
+ - The size of the repository in relation to the size of the base volume
+ required: False
+ type: int
+ default: 20
+ warning_threshold:
+ description:
+ - The repository utilization warning threshold, as a percentage of the repository volume capacity.
+ required: False
+ type: int
+ default: 80
+ delete_limit:
+ description:
+ - The automatic deletion indicator.
+ - If non-zero, the oldest snapshot image will be automatically deleted when creating a new snapshot image to keep the total number of
+ snapshot images limited to the number specified.
+ - This value is overridden by the consistency group setting if this snapshot group is associated with a consistency group.
+ required: False
+ type: int
+ default: 30
+ full_policy:
+ description:
+ - The behavior on when the data repository becomes full.
+ - This value is overridden by consistency group setting if this snapshot group is associated with a consistency group
+ required: False
+ default: purgepit
+ type: str
+ choices: ['unknown', 'failbasewrites', 'purgepit']
+ storage_pool_name:
+ required: True
+ description:
+ - The name of the storage pool on which to allocate the repository volume.
+ type: str
+ rollback_priority:
+ required: False
+ description:
+ - The importance of the rollback operation.
+ - This value is overridden by consistency group setting if this snapshot group is associated with a consistency group
+ choices: ['highest', 'high', 'medium', 'low', 'lowest']
+ type: str
+ default: medium
+"""
+
+EXAMPLES = """
+ - name: Configure Snapshot group
+ netapp_e_snapshot_group:
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+ base_volume_name: SSGroup_test
+ name=: OOSS_Group
+ repo_pct: 20
+ warning_threshold: 85
+ delete_limit: 30
+ full_policy: purgepit
+ storage_pool_name: Disk_Pool_1
+ rollback_priority: medium
+"""
+RETURN = """
+msg:
+ description: Success message
+ returned: success
+ type: str
+ sample: json facts for newly created snapshot group.
+"""
+HEADERS = {
+ "Content-Type": "application/json",
+ "Accept": "application/json",
+}
+import json
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule
+
+from ansible.module_utils._text import to_native
+from ansible.module_utils.urls import open_url
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+
+
+def request(url, data=None, headers=None, method='GET', use_proxy=True,
+ force=False, last_mod_time=None, timeout=10, validate_certs=True,
+ url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
+ try:
+ r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
+ force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
+ url_username=url_username, url_password=url_password, http_agent=http_agent,
+ force_basic_auth=force_basic_auth)
+ except HTTPError as err:
+ r = err.fp
+
+ try:
+ raw_data = r.read()
+ if raw_data:
+ data = json.loads(raw_data)
+ else:
+ raw_data = None
+ except Exception:
+ if ignore_errors:
+ pass
+ else:
+ raise Exception(raw_data)
+
+ resp_code = r.getcode()
+
+ if resp_code >= 400 and not ignore_errors:
+ raise Exception(resp_code, data)
+ else:
+ return resp_code, data
+
+
+class SnapshotGroup(object):
+ def __init__(self):
+
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(
+ api_username=dict(type='str', required=True),
+ api_password=dict(type='str', required=True, no_log=True),
+ api_url=dict(type='str', required=True),
+ state=dict(required=True, choices=['present', 'absent']),
+ base_volume_name=dict(required=True),
+ name=dict(required=True),
+ repo_pct=dict(default=20, type='int'),
+ warning_threshold=dict(default=80, type='int'),
+ delete_limit=dict(default=30, type='int'),
+ full_policy=dict(default='purgepit', choices=['unknown', 'failbasewrites', 'purgepit']),
+ rollback_priority=dict(default='medium', choices=['highest', 'high', 'medium', 'low', 'lowest']),
+ storage_pool_name=dict(type='str'),
+ ssid=dict(required=True),
+ )
+
+ self.module = AnsibleModule(argument_spec=argument_spec)
+
+ self.post_data = dict()
+ self.warning_threshold = self.module.params['warning_threshold']
+ self.base_volume_name = self.module.params['base_volume_name']
+ self.name = self.module.params['name']
+ self.repo_pct = self.module.params['repo_pct']
+ self.delete_limit = self.module.params['delete_limit']
+ self.full_policy = self.module.params['full_policy']
+ self.rollback_priority = self.module.params['rollback_priority']
+ self.storage_pool_name = self.module.params['storage_pool_name']
+ self.state = self.module.params['state']
+
+ self.url = self.module.params['api_url']
+ self.user = self.module.params['api_username']
+ self.pwd = self.module.params['api_password']
+ self.certs = self.module.params['validate_certs']
+ self.ssid = self.module.params['ssid']
+
+ if not self.url.endswith('/'):
+ self.url += '/'
+
+ self.changed = False
+
+ @property
+ def pool_id(self):
+ pools = 'storage-systems/%s/storage-pools' % self.ssid
+ url = self.url + pools
+ try:
+ (rc, data) = request(url, headers=HEADERS, url_username=self.user, url_password=self.pwd)
+ except Exception as err:
+ self.module.fail_json(msg="Snapshot group module - Failed to fetch storage pools. " +
+ "Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+
+ for pool in data:
+ if pool['name'] == self.storage_pool_name:
+ self.pool_data = pool
+ return pool['id']
+
+ self.module.fail_json(msg="No storage pool with the name: '%s' was found" % self.name)
+
+ @property
+ def volume_id(self):
+ volumes = 'storage-systems/%s/volumes' % self.ssid
+ url = self.url + volumes
+ try:
+ rc, data = request(url, headers=HEADERS, url_username=self.user, url_password=self.pwd,
+ validate_certs=self.certs)
+ except Exception as err:
+ self.module.fail_json(msg="Snapshot group module - Failed to fetch volumes. " +
+ "Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+ qty = 0
+ for volume in data:
+ if volume['name'] == self.base_volume_name:
+ qty += 1
+
+ if qty > 1:
+ self.module.fail_json(msg="More than one volume with the name: %s was found, "
+ "please ensure your volume has a unique name" % self.base_volume_name)
+ else:
+ Id = volume['id']
+ self.volume = volume
+
+ try:
+ return Id
+ except NameError:
+ self.module.fail_json(msg="No volume with the name: %s, was found" % self.base_volume_name)
+
+ @property
+ def snapshot_group_id(self):
+ url = self.url + 'storage-systems/%s/snapshot-groups' % self.ssid
+ try:
+ rc, data = request(url, headers=HEADERS, url_username=self.user, url_password=self.pwd,
+ validate_certs=self.certs)
+ except Exception as err:
+ self.module.fail_json(msg="Failed to fetch snapshot groups. " +
+ "Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+ for ssg in data:
+ if ssg['name'] == self.name:
+ self.ssg_data = ssg
+ return ssg['id']
+
+ return None
+
+ @property
+ def ssg_needs_update(self):
+ if self.ssg_data['fullWarnThreshold'] != self.warning_threshold or \
+ self.ssg_data['autoDeleteLimit'] != self.delete_limit or \
+ self.ssg_data['repFullPolicy'] != self.full_policy or \
+ self.ssg_data['rollbackPriority'] != self.rollback_priority:
+ return True
+ else:
+ return False
+
+ def create_snapshot_group(self):
+ self.post_data = dict(
+ baseMappableObjectId=self.volume_id,
+ name=self.name,
+ repositoryPercentage=self.repo_pct,
+ warningThreshold=self.warning_threshold,
+ autoDeleteLimit=self.delete_limit,
+ fullPolicy=self.full_policy,
+ storagePoolId=self.pool_id,
+ )
+ snapshot = 'storage-systems/%s/snapshot-groups' % self.ssid
+ url = self.url + snapshot
+ try:
+ rc, self.ssg_data = request(url, data=json.dumps(self.post_data), method='POST', headers=HEADERS,
+ url_username=self.user, url_password=self.pwd, validate_certs=self.certs)
+ except Exception as err:
+ self.module.fail_json(msg="Failed to create snapshot group. " +
+ "Snapshot group [%s]. Id [%s]. Error [%s]." % (self.name,
+ self.ssid,
+ to_native(err)))
+
+ if not self.snapshot_group_id:
+ self.snapshot_group_id = self.ssg_data['id']
+
+ if self.ssg_needs_update:
+ self.update_ssg()
+ else:
+ self.module.exit_json(changed=True, **self.ssg_data)
+
+ def update_ssg(self):
+ self.post_data = dict(
+ warningThreshold=self.warning_threshold,
+ autoDeleteLimit=self.delete_limit,
+ fullPolicy=self.full_policy,
+ rollbackPriority=self.rollback_priority
+ )
+
+ url = self.url + "storage-systems/%s/snapshot-groups/%s" % (self.ssid, self.snapshot_group_id)
+ try:
+ rc, self.ssg_data = request(url, data=json.dumps(self.post_data), method='POST', headers=HEADERS,
+ url_username=self.user, url_password=self.pwd, validate_certs=self.certs)
+ except Exception as err:
+ self.module.fail_json(msg="Failed to update snapshot group. " +
+ "Snapshot group [%s]. Id [%s]. Error [%s]." % (self.name,
+ self.ssid,
+ to_native(err)))
+
+ def apply(self):
+ if self.state == 'absent':
+ if self.snapshot_group_id:
+ try:
+ rc, resp = request(
+ self.url + 'storage-systems/%s/snapshot-groups/%s' % (self.ssid, self.snapshot_group_id),
+ method='DELETE', headers=HEADERS, url_password=self.pwd, url_username=self.user,
+ validate_certs=self.certs)
+ except Exception as err:
+ self.module.fail_json(msg="Failed to delete snapshot group. " +
+ "Snapshot group [%s]. Id [%s]. Error [%s]." % (self.name,
+ self.ssid,
+ to_native(err)))
+ self.module.exit_json(changed=True, msg="Snapshot group removed", **self.ssg_data)
+ else:
+ self.module.exit_json(changed=False, msg="Snapshot group absent")
+
+ elif self.snapshot_group_id:
+ if self.ssg_needs_update:
+ self.update_ssg()
+ self.module.exit_json(changed=True, **self.ssg_data)
+ else:
+ self.module.exit_json(changed=False, **self.ssg_data)
+ else:
+ self.create_snapshot_group()
+
+
+def main():
+ vg = SnapshotGroup()
+ vg.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_snapshot_images.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_snapshot_images.py
new file mode 100644
index 000000000..f0ea8fb66
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_snapshot_images.py
@@ -0,0 +1,257 @@
+#!/usr/bin/python
+
+# (c) 2016, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['deprecated'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = """
+---
+module: netapp_e_snapshot_images
+short_description: NetApp E-Series create and delete snapshot images
+description:
+ - Create and delete snapshots images on snapshot groups for NetApp E-series storage arrays.
+ - Only the oldest snapshot image can be deleted so consistency is preserved.
+ - "Related: Snapshot volumes are created from snapshot images."
+version_added: '2.2'
+author: Kevin Hulquest (@hulquest)
+options:
+ ssid:
+ description:
+ - Storage system identifier
+ type: str
+ api_username:
+ required: true
+ description:
+ - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ type: str
+ api_password:
+ required: true
+ description:
+ - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ type: str
+ api_url:
+ required: true
+ description:
+ - The url to the SANtricity WebServices Proxy or embedded REST API.
+ type: str
+ validate_certs:
+ required: false
+ default: true
+ description:
+ - Should https certificates be validated?
+ type: bool
+ snapshot_group:
+ description:
+ - The name of the snapshot group in which you want to create a snapshot image.
+ required: True
+ type: str
+ state:
+ description:
+ - Whether a new snapshot image should be created or oldest be deleted.
+ required: True
+ type: str
+ choices: ['create', 'remove']
+"""
+EXAMPLES = """
+ - name: Create Snapshot
+ netapp_e_snapshot_images:
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ validate_certs }}"
+ snapshot_group: "3300000060080E5000299C24000005B656D9F394"
+ state: 'create'
+"""
+RETURN = """
+---
+ msg:
+ description: State of operation
+ type: str
+ returned: always
+ sample: "Created snapshot image"
+ image_id:
+ description: ID of snapshot image
+ type: str
+ returned: state == created
+ sample: "3400000060080E5000299B640063074057BC5C5E "
+"""
+
+HEADERS = {
+ "Content-Type": "application/json",
+ "Accept": "application/json",
+}
+import json
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule
+
+from ansible.module_utils._text import to_native
+from ansible.module_utils.urls import open_url
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+
+
+def request(url, data=None, headers=None, method='GET', use_proxy=True,
+ force=False, last_mod_time=None, timeout=10, validate_certs=True,
+ url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
+ try:
+ r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
+ force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
+ url_username=url_username, url_password=url_password, http_agent=http_agent,
+ force_basic_auth=force_basic_auth)
+ except HTTPError as err:
+ r = err.fp
+
+ try:
+ raw_data = r.read()
+ if raw_data:
+ data = json.loads(raw_data)
+ else:
+ raw_data = None
+ except Exception:
+ if ignore_errors:
+ pass
+ else:
+ raise Exception(raw_data)
+
+ resp_code = r.getcode()
+
+ if resp_code >= 400 and not ignore_errors:
+ raise Exception(resp_code, data)
+ else:
+ return resp_code, data
+
+
+def snapshot_group_from_name(module, ssid, api_url, api_pwd, api_usr, name):
+ snap_groups = 'storage-systems/%s/snapshot-groups' % ssid
+ snap_groups_url = api_url + snap_groups
+ (ret, snapshot_groups) = request(snap_groups_url, url_username=api_usr, url_password=api_pwd, headers=HEADERS,
+ validate_certs=module.params['validate_certs'])
+
+ snapshot_group_id = None
+ for snapshot_group in snapshot_groups:
+ if name == snapshot_group['label']:
+ snapshot_group_id = snapshot_group['pitGroupRef']
+ break
+ if snapshot_group_id is None:
+ module.fail_json(msg="Failed to lookup snapshot group. Group [%s]. Id [%s]." % (name, ssid))
+
+ return snapshot_group
+
+
+def oldest_image(module, ssid, api_url, api_pwd, api_usr, name):
+ get_status = 'storage-systems/%s/snapshot-images' % ssid
+ url = api_url + get_status
+
+ try:
+ (ret, images) = request(url, url_username=api_usr, url_password=api_pwd, headers=HEADERS,
+ validate_certs=module.params['validate_certs'])
+ except Exception as err:
+ module.fail_json(msg="Failed to get snapshot images for group. Group [%s]. Id [%s]. Error [%s]" %
+ (name, ssid, to_native(err)))
+ if not images:
+ module.exit_json(msg="There are no snapshot images to remove. Group [%s]. Id [%s]." % (name, ssid))
+
+ oldest = min(images, key=lambda x: x['pitSequenceNumber'])
+ if oldest is None or "pitRef" not in oldest:
+ module.fail_json(msg="Failed to lookup oldest snapshot group. Group [%s]. Id [%s]." % (name, ssid))
+
+ return oldest
+
+
+def create_image(module, ssid, api_url, pwd, user, p, snapshot_group):
+ snapshot_group_obj = snapshot_group_from_name(module, ssid, api_url, pwd, user, snapshot_group)
+ snapshot_group_id = snapshot_group_obj['pitGroupRef']
+ endpoint = 'storage-systems/%s/snapshot-images' % ssid
+ url = api_url + endpoint
+ post_data = json.dumps({'groupId': snapshot_group_id})
+
+ image_data = request(url, data=post_data, method='POST', url_username=user, url_password=pwd, headers=HEADERS,
+ validate_certs=module.params['validate_certs'])
+
+ if image_data[1]['status'] == 'optimal':
+ status = True
+ id = image_data[1]['id']
+ else:
+ status = False
+ id = ''
+
+ return status, id
+
+
+def delete_image(module, ssid, api_url, pwd, user, snapshot_group):
+ image = oldest_image(module, ssid, api_url, pwd, user, snapshot_group)
+ image_id = image['pitRef']
+ endpoint = 'storage-systems/%s/snapshot-images/%s' % (ssid, image_id)
+ url = api_url + endpoint
+
+ try:
+ (ret, image_data) = request(url, method='DELETE', url_username=user, url_password=pwd, headers=HEADERS,
+ validate_certs=module.params['validate_certs'])
+ except Exception as e:
+ image_data = (e[0], e[1])
+
+ if ret == 204:
+ deleted_status = True
+ error_message = ''
+ else:
+ deleted_status = False
+ error_message = image_data[1]['errorMessage']
+
+ return deleted_status, error_message
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ snapshot_group=dict(required=True, type='str'),
+ ssid=dict(required=True, type='str'),
+ api_url=dict(required=True),
+ api_username=dict(required=False),
+ api_password=dict(required=False, no_log=True),
+ validate_certs=dict(required=False, type='bool', default=True),
+ state=dict(required=True, choices=['create', 'remove'], type='str'),
+ ))
+ module = AnsibleModule(argument_spec)
+
+ p = module.params
+
+ ssid = p.pop('ssid')
+ api_url = p.pop('api_url')
+ user = p.pop('api_username')
+ pwd = p.pop('api_password')
+ snapshot_group = p.pop('snapshot_group')
+ desired_state = p.pop('state')
+
+ if not api_url.endswith('/'):
+ api_url += '/'
+
+ if desired_state == 'create':
+ created_status, snapshot_id = create_image(module, ssid, api_url, pwd, user, p, snapshot_group)
+
+ if created_status:
+ module.exit_json(changed=True, msg='Created snapshot image', image_id=snapshot_id)
+ else:
+ module.fail_json(
+ msg="Could not create snapshot image on system %s, in snapshot group %s" % (ssid, snapshot_group))
+ else:
+ deleted, error_msg = delete_image(module, ssid, api_url, pwd, user, snapshot_group)
+
+ if deleted:
+ module.exit_json(changed=True, msg='Deleted snapshot image for snapshot group [%s]' % (snapshot_group))
+ else:
+ module.fail_json(
+ msg="Could not create snapshot image on system %s, in snapshot group %s --- %s" % (
+ ssid, snapshot_group, error_msg))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_snapshot_volume.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_snapshot_volume.py
new file mode 100644
index 000000000..0019d6f67
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_snapshot_volume.py
@@ -0,0 +1,289 @@
+#!/usr/bin/python
+
+# (c) 2016, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['deprecated'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = """
+---
+module: netapp_e_snapshot_volume
+short_description: NetApp E-Series manage snapshot volumes.
+description:
+ - Create, update, remove snapshot volumes for NetApp E/EF-Series storage arrays.
+version_added: '2.2'
+author: Kevin Hulquest (@hulquest)
+notes:
+ - Only I(full_threshold) is supported for update operations. If the snapshot volume already exists and the threshold matches, then an C(ok) status
+ will be returned, no other changes can be made to a pre-existing snapshot volume.
+options:
+ api_username:
+ required: true
+ description:
+ - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ type: str
+ api_password:
+ required: true
+ description:
+ - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ type: str
+ api_url:
+ required: true
+ description:
+ - The url to the SANtricity WebServices Proxy or embedded REST API.
+ type: str
+ validate_certs:
+ required: false
+ default: true
+ description:
+ - Should https certificates be validated?
+ type: bool
+ ssid:
+ description:
+ - storage array ID
+ type: str
+ required: true
+ snapshot_image_id:
+ required: True
+ type: str
+ description:
+ - The identifier of the snapshot image used to create the new snapshot volume.
+ - "Note: You'll likely want to use the M(netapp_e_facts) module to find the ID of the image you want."
+ full_threshold:
+ description:
+ - The repository utilization warning threshold percentage
+ default: 85
+ type: int
+ name:
+ required: True
+ description:
+ - The name you wish to give the snapshot volume
+ type: str
+ view_mode:
+ required: True
+ type: str
+ description:
+ - The snapshot volume access mode
+ choices: ['readOnly', 'readWrite', 'modeUnknown', '__Undefined']
+ default: 'readOnly'
+ repo_percentage:
+ description:
+ - The size of the view in relation to the size of the base volume
+ default: 20
+ type: int
+ storage_pool_name:
+ description:
+ - Name of the storage pool on which to allocate the repository volume.
+ type: str
+ required: True
+ state:
+ description:
+ - Whether to create or remove the snapshot volume
+ required: True
+ type: str
+ choices:
+ - absent
+ - present
+"""
+EXAMPLES = """
+ - name: Snapshot volume
+ netapp_e_snapshot_volume:
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}/"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ state: present
+ storage_pool_name: "{{ snapshot_volume_storage_pool_name }}"
+ snapshot_image_id: "{{ snapshot_volume_image_id }}"
+ name: "{{ snapshot_volume_name }}"
+"""
+RETURN = """
+msg:
+ description: Success message
+ returned: success
+ type: str
+ sample: Json facts for the volume that was created.
+"""
+HEADERS = {
+ "Content-Type": "application/json",
+ "Accept": "application/json",
+}
+import json
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule
+
+from ansible.module_utils.urls import open_url
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+
+
+def request(url, data=None, headers=None, method='GET', use_proxy=True,
+ force=False, last_mod_time=None, timeout=10, validate_certs=True,
+ url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
+ try:
+ r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
+ force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
+ url_username=url_username, url_password=url_password, http_agent=http_agent,
+ force_basic_auth=force_basic_auth)
+ except HTTPError as err:
+ r = err.fp
+
+ try:
+ raw_data = r.read()
+ if raw_data:
+ data = json.loads(raw_data)
+ else:
+ raw_data = None
+ except Exception:
+ if ignore_errors:
+ pass
+ else:
+ raise Exception(raw_data)
+
+ resp_code = r.getcode()
+
+ if resp_code >= 400 and not ignore_errors:
+ raise Exception(resp_code, data)
+ else:
+ return resp_code, data
+
+
+class SnapshotVolume(object):
+ def __init__(self):
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ api_username=dict(type='str', required=True),
+ api_password=dict(type='str', required=True, no_log=True),
+ api_url=dict(type='str', required=True),
+ ssid=dict(type='str', required=True),
+ snapshot_image_id=dict(type='str', required=True),
+ full_threshold=dict(type='int', default=85),
+ name=dict(type='str', required=True),
+ view_mode=dict(type='str', default='readOnly',
+ choices=['readOnly', 'readWrite', 'modeUnknown', '__Undefined']),
+ repo_percentage=dict(type='int', default=20),
+ storage_pool_name=dict(type='str', required=True),
+ state=dict(type='str', required=True, choices=['absent', 'present'])
+ ))
+
+ self.module = AnsibleModule(argument_spec=argument_spec)
+ args = self.module.params
+ self.state = args['state']
+ self.ssid = args['ssid']
+ self.snapshot_image_id = args['snapshot_image_id']
+ self.full_threshold = args['full_threshold']
+ self.name = args['name']
+ self.view_mode = args['view_mode']
+ self.repo_percentage = args['repo_percentage']
+ self.storage_pool_name = args['storage_pool_name']
+ self.url = args['api_url']
+ self.user = args['api_username']
+ self.pwd = args['api_password']
+ self.certs = args['validate_certs']
+
+ if not self.url.endswith('/'):
+ self.url += '/'
+
+ @property
+ def pool_id(self):
+ pools = 'storage-systems/%s/storage-pools' % self.ssid
+ url = self.url + pools
+ (rc, data) = request(url, headers=HEADERS, url_username=self.user, url_password=self.pwd,
+ validate_certs=self.certs)
+
+ for pool in data:
+ if pool['name'] == self.storage_pool_name:
+ self.pool_data = pool
+ return pool['id']
+
+ self.module.fail_json(msg="No storage pool with the name: '%s' was found" % self.name)
+
+ @property
+ def ss_vol_exists(self):
+ rc, ss_vols = request(self.url + 'storage-systems/%s/snapshot-volumes' % self.ssid, headers=HEADERS,
+ url_username=self.user, url_password=self.pwd, validate_certs=self.certs)
+ if ss_vols:
+ for ss_vol in ss_vols:
+ if ss_vol['name'] == self.name:
+ self.ss_vol = ss_vol
+ return True
+ else:
+ return False
+
+ return False
+
+ @property
+ def ss_vol_needs_update(self):
+ if self.ss_vol['fullWarnThreshold'] != self.full_threshold:
+ return True
+ else:
+ return False
+
+ def create_ss_vol(self):
+ post_data = dict(
+ snapshotImageId=self.snapshot_image_id,
+ fullThreshold=self.full_threshold,
+ name=self.name,
+ viewMode=self.view_mode,
+ repositoryPercentage=self.repo_percentage,
+ repositoryPoolId=self.pool_id
+ )
+
+ rc, create_resp = request(self.url + 'storage-systems/%s/snapshot-volumes' % self.ssid,
+ data=json.dumps(post_data), headers=HEADERS, url_username=self.user,
+ url_password=self.pwd, validate_certs=self.certs, method='POST')
+
+ self.ss_vol = create_resp
+ # Doing a check after creation because the creation call fails to set the specified warning threshold
+ if self.ss_vol_needs_update:
+ self.update_ss_vol()
+ else:
+ self.module.exit_json(changed=True, **create_resp)
+
+ def update_ss_vol(self):
+ post_data = dict(
+ fullThreshold=self.full_threshold,
+ )
+
+ rc, resp = request(self.url + 'storage-systems/%s/snapshot-volumes/%s' % (self.ssid, self.ss_vol['id']),
+ data=json.dumps(post_data), headers=HEADERS, url_username=self.user, url_password=self.pwd,
+ method='POST', validate_certs=self.certs)
+
+ self.module.exit_json(changed=True, **resp)
+
+ def remove_ss_vol(self):
+ rc, resp = request(self.url + 'storage-systems/%s/snapshot-volumes/%s' % (self.ssid, self.ss_vol['id']),
+ headers=HEADERS, url_username=self.user, url_password=self.pwd, validate_certs=self.certs,
+ method='DELETE')
+ self.module.exit_json(changed=True, msg="Volume successfully deleted")
+
+ def apply(self):
+ if self.state == 'present':
+ if self.ss_vol_exists:
+ if self.ss_vol_needs_update:
+ self.update_ss_vol()
+ else:
+ self.module.exit_json(changed=False, **self.ss_vol)
+ else:
+ self.create_ss_vol()
+ else:
+ if self.ss_vol_exists:
+ self.remove_ss_vol()
+ else:
+ self.module.exit_json(changed=False, msg="Volume already absent")
+
+
+def main():
+ sv = SnapshotVolume()
+ sv.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_storage_system.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_storage_system.py
new file mode 100644
index 000000000..a0f0d005e
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_storage_system.py
@@ -0,0 +1,310 @@
+#!/usr/bin/python
+
+# (c) 2016, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['deprecated'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+module: netapp_e_storage_system
+version_added: "2.2"
+short_description: NetApp E-Series Web Services Proxy manage storage arrays
+description:
+- Manage the arrays accessible via a NetApp Web Services Proxy for NetApp E-series storage arrays.
+options:
+ api_username:
+ description:
+ - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ type: str
+ required: true
+ api_password:
+ description:
+ - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ type: str
+ required: true
+ api_url:
+ description:
+ - The url to the SANtricity WebServices Proxy or embedded REST API.
+ required: true
+ type: str
+ validate_certs:
+ description:
+ - Should https certificates be validated?
+ type: bool
+ default: 'yes'
+ ssid:
+ description:
+ - The ID of the array to manage. This value must be unique for each array.
+ type: str
+ required: true
+ state:
+ description:
+ - Whether the specified array should be configured on the Web Services Proxy or not.
+ required: true
+ type: str
+ choices: ['present', 'absent']
+ controller_addresses:
+ description:
+ - The list addresses for the out-of-band management adapter or the agent host. Mutually exclusive of array_wwn parameter.
+ type: list
+ required: true
+ array_wwn:
+ description:
+ - The WWN of the array to manage. Only necessary if in-band managing multiple arrays on the same agent host. Mutually exclusive of
+ controller_addresses parameter.
+ type: str
+ array_password:
+ description:
+ - The management password of the array to manage, if set.
+ type: str
+ enable_trace:
+ description:
+ - Enable trace logging for SYMbol calls to the storage system.
+ type: bool
+ default: 'no'
+ meta_tags:
+ description:
+ - Optional meta tags to associate to this storage system
+ type: list
+ array_status_timeout_sec:
+ description:
+ - Array status timeout measured in seconds
+ default: 60
+ type: int
+author: Kevin Hulquest (@hulquest)
+'''
+
+EXAMPLES = '''
+---
+ - name: Presence of storage system
+ netapp_e_storage_system:
+ ssid: "{{ item.key }}"
+ state: present
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+ controller_addresses:
+ - "{{ item.value.address1 }}"
+ - "{{ item.value.address2 }}"
+ with_dict: "{{ storage_systems }}"
+ when: check_storage_system
+'''
+
+RETURN = '''
+msg:
+ description: State of request
+ type: str
+ returned: always
+ sample: 'Storage system removed.'
+'''
+import json
+from datetime import datetime as dt, timedelta
+from time import sleep
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible.module_utils.urls import open_url
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+
+
+def request(url, data=None, headers=None, method='GET', use_proxy=True,
+ force=False, last_mod_time=None, timeout=10, validate_certs=True,
+ url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
+ try:
+ r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
+ force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
+ url_username=url_username, url_password=url_password, http_agent=http_agent,
+ force_basic_auth=force_basic_auth)
+ except HTTPError as err:
+ r = err.fp
+
+ try:
+ raw_data = r.read()
+ if raw_data:
+ data = json.loads(raw_data)
+ else:
+ raw_data = None
+ except Exception:
+ if ignore_errors:
+ pass
+ else:
+ raise Exception(raw_data)
+
+ resp_code = r.getcode()
+
+ if resp_code >= 400 and not ignore_errors:
+ raise Exception(resp_code, data)
+ else:
+ return resp_code, data
+
+
+def do_post(ssid, api_url, post_headers, api_usr, api_pwd, validate_certs, request_body, timeout):
+ (rc, resp) = request(api_url + "/storage-systems", data=request_body, headers=post_headers,
+ method='POST', url_username=api_usr, url_password=api_pwd,
+ validate_certs=validate_certs)
+ status = None
+ return_resp = resp
+ if 'status' in resp:
+ status = resp['status']
+
+ if rc == 201:
+ status = 'neverContacted'
+ fail_after_time = dt.utcnow() + timedelta(seconds=timeout)
+
+ while status == 'neverContacted':
+ if dt.utcnow() > fail_after_time:
+ raise Exception("web proxy timed out waiting for array status")
+
+ sleep(1)
+ (rc, system_resp) = request(api_url + "/storage-systems/%s" % ssid,
+ headers=dict(Accept="application/json"), url_username=api_usr,
+ url_password=api_pwd, validate_certs=validate_certs,
+ ignore_errors=True)
+ status = system_resp['status']
+ return_resp = system_resp
+
+ return status, return_resp
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ ssid=dict(required=True, type='str'),
+ controller_addresses=dict(type='list'),
+ array_wwn=dict(required=False, type='str'),
+ array_password=dict(required=False, type='str', no_log=True),
+ array_status_timeout_sec=dict(default=60, type='int'),
+ enable_trace=dict(default=False, type='bool'),
+ meta_tags=dict(type='list')
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[['controller_addresses', 'array_wwn']],
+ required_if=[('state', 'present', ['controller_addresses'])]
+ )
+
+ p = module.params
+
+ state = p['state']
+ ssid = p['ssid']
+ controller_addresses = p['controller_addresses']
+ array_wwn = p['array_wwn']
+ array_password = p['array_password']
+ array_status_timeout_sec = p['array_status_timeout_sec']
+ validate_certs = p['validate_certs']
+ meta_tags = p['meta_tags']
+ enable_trace = p['enable_trace']
+
+ api_usr = p['api_username']
+ api_pwd = p['api_password']
+ api_url = p['api_url']
+
+ changed = False
+ array_exists = False
+
+ try:
+ (rc, resp) = request(api_url + "/storage-systems/%s" % ssid, headers=dict(Accept="application/json"),
+ url_username=api_usr, url_password=api_pwd, validate_certs=validate_certs,
+ ignore_errors=True)
+ except Exception as err:
+ module.fail_json(msg="Error accessing storage-system with id [%s]. Error [%s]" % (ssid, to_native(err)))
+
+ array_exists = True
+ array_detail = resp
+
+ if rc == 200:
+ if state == 'absent':
+ changed = True
+ array_exists = False
+ elif state == 'present':
+ current_addresses = frozenset(i for i in (array_detail['ip1'], array_detail['ip2']) if i)
+ if set(controller_addresses) != current_addresses:
+ changed = True
+ if array_detail['wwn'] != array_wwn and array_wwn is not None:
+ module.fail_json(
+ msg='It seems you may have specified a bad WWN. The storage system ID you specified, %s, currently has the WWN of %s' %
+ (ssid, array_detail['wwn'])
+ )
+ elif rc == 404:
+ if state == 'present':
+ changed = True
+ array_exists = False
+ else:
+ changed = False
+ module.exit_json(changed=changed, msg="Storage system was not present.")
+
+ if changed and not module.check_mode:
+ if state == 'present':
+ if not array_exists:
+ # add the array
+ array_add_req = dict(
+ id=ssid,
+ controllerAddresses=controller_addresses,
+ metaTags=meta_tags,
+ enableTrace=enable_trace
+ )
+
+ if array_wwn:
+ array_add_req['wwn'] = array_wwn
+
+ if array_password:
+ array_add_req['password'] = array_password
+
+ post_headers = dict(Accept="application/json")
+ post_headers['Content-Type'] = 'application/json'
+ request_data = json.dumps(array_add_req)
+
+ try:
+ (rc, resp) = do_post(ssid, api_url, post_headers, api_usr, api_pwd, validate_certs, request_data,
+ array_status_timeout_sec)
+ except Exception as err:
+ module.fail_json(msg="Failed to add storage system. Id[%s]. Request body [%s]. Error[%s]." %
+ (ssid, request_data, to_native(err)))
+
+ else: # array exists, modify...
+ post_headers = dict(Accept="application/json")
+ post_headers['Content-Type'] = 'application/json'
+ post_body = dict(
+ controllerAddresses=controller_addresses,
+ removeAllTags=True,
+ enableTrace=enable_trace,
+ metaTags=meta_tags
+ )
+
+ try:
+ (rc, resp) = do_post(ssid, api_url, post_headers, api_usr, api_pwd, validate_certs, post_body,
+ array_status_timeout_sec)
+ except Exception as err:
+ module.fail_json(msg="Failed to update storage system. Id[%s]. Request body [%s]. Error[%s]." %
+ (ssid, post_body, to_native(err)))
+
+ elif state == 'absent':
+ # delete the array
+ try:
+ (rc, resp) = request(api_url + "/storage-systems/%s" % ssid, method='DELETE',
+ url_username=api_usr,
+ url_password=api_pwd, validate_certs=validate_certs)
+ except Exception as err:
+ module.fail_json(msg="Failed to remove storage array. Id[%s]. Error[%s]." % (ssid, to_native(err)))
+
+ if rc == 422:
+ module.exit_json(changed=changed, msg="Storage system was not presented.")
+ if rc == 204:
+ module.exit_json(changed=changed, msg="Storage system removed.")
+
+ module.exit_json(changed=changed, **resp)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_storagepool.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_storagepool.py
new file mode 100644
index 000000000..5c74a415b
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_storagepool.py
@@ -0,0 +1,941 @@
+#!/usr/bin/python
+
+# (c) 2016, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {"metadata_version": "1.1",
+ "status": ["deprecated"],
+ "supported_by": "community"}
+
+DOCUMENTATION = """
+---
+module: netapp_e_storagepool
+short_description: NetApp E-Series manage volume groups and disk pools
+description: Create or remove volume groups and disk pools for NetApp E-series storage arrays.
+version_added: '2.2'
+author:
+ - Kevin Hulquest (@hulquest)
+ - Nathan Swartz (@ndswartz)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.netapp.eseries
+options:
+ state:
+ description:
+ - Whether the specified storage pool should exist or not.
+ - Note that removing a storage pool currently requires the removal of all defined volumes first.
+ required: true
+ type: str
+ choices: ["present", "absent"]
+ name:
+ description:
+ - The name of the storage pool to manage
+ type: str
+ required: true
+ criteria_drive_count:
+ description:
+ - The number of disks to use for building the storage pool.
+ - When I(state=="present") then I(criteria_drive_count) or I(criteria_min_usable_capacity) must be specified.
+ - The pool will be expanded if this number exceeds the number of disks already in place (See expansion note below)
+ required: false
+ type: int
+ criteria_min_usable_capacity:
+ description:
+ - The minimum size of the storage pool (in size_unit).
+ - When I(state=="present") then I(criteria_drive_count) or I(criteria_min_usable_capacity) must be specified.
+ - The pool will be expanded if this value exceeds its current size. (See expansion note below)
+ required: false
+ type: float
+ criteria_drive_type:
+ description:
+ - The type of disk (hdd or ssd) to use when searching for candidates to use.
+ - When not specified each drive type will be evaluated until successful drive candidates are found starting with
+ the most prevalent drive type.
+ required: false
+ type: str
+ choices: ["hdd","ssd"]
+ criteria_size_unit:
+ description:
+ - The unit used to interpret size parameters
+ choices: ["bytes", "b", "kb", "mb", "gb", "tb", "pb", "eb", "zb", "yb"]
+ type: str
+ default: "gb"
+ criteria_drive_min_size:
+ description:
+ - The minimum individual drive size (in size_unit) to consider when choosing drives for the storage pool.
+ type: float
+ criteria_drive_interface_type:
+ description:
+ - The interface type to use when selecting drives for the storage pool
+ - If not provided then all interface types will be considered.
+ choices: ["sas", "sas4k", "fibre", "fibre520b", "scsi", "sata", "pata"]
+ type: str
+ required: false
+ criteria_drive_require_da:
+ description:
+ - Ensures the storage pool will be created with only data assurance (DA) capable drives.
+ - Only available for new storage pools; existing storage pools cannot be converted.
+ default: false
+ type: bool
+ version_added: '2.9'
+ criteria_drive_require_fde:
+ description:
+ - Whether full disk encryption ability is required for drives to be added to the storage pool
+ default: false
+ type: bool
+ raid_level:
+ description:
+ - The RAID level of the storage pool to be created.
+ - Required only when I(state=="present").
+ - When I(raid_level=="raidDiskPool") then I(criteria_drive_count >= 10 or criteria_drive_count >= 11) is required
+ depending on the storage array specifications.
+ - When I(raid_level=="raid0") then I(1<=criteria_drive_count) is required.
+ - When I(raid_level=="raid1") then I(2<=criteria_drive_count) is required.
+ - When I(raid_level=="raid3") then I(3<=criteria_drive_count<=30) is required.
+ - When I(raid_level=="raid5") then I(3<=criteria_drive_count<=30) is required.
+ - When I(raid_level=="raid6") then I(5<=criteria_drive_count<=30) is required.
+ - Note that raidAll will be treated as raidDiskPool and raid3 as raid5.
+ required: false
+ choices: ["raidAll", "raid0", "raid1", "raid3", "raid5", "raid6", "raidDiskPool"]
+ type: str
+ default: "raidDiskPool"
+ secure_pool:
+ description:
+ - Enables security at rest feature on the storage pool.
+ - Will only work if all drives in the pool are security capable (FDE, FIPS, or mix)
+ - Warning, once security is enabled it is impossible to disable without erasing the drives.
+ required: false
+ type: bool
+ reserve_drive_count:
+ description:
+ - Set the number of drives reserved by the storage pool for reconstruction operations.
+ - Only valid on raid disk pools.
+ type: int
+ required: false
+ remove_volumes:
+ description:
+ - Prior to removing a storage pool, delete all volumes in the pool.
+ default: true
+ type: bool
+ erase_secured_drives:
+ description:
+ - If I(state=="absent") then all storage pool drives will be erase
+ - If I(state=="present") then delete all available storage array drives that have security enabled.
+ default: true
+ type: bool
+notes:
+ - The expansion operations are non-blocking due to the time consuming nature of expanding volume groups
+ - Traditional volume groups (raid0, raid1, raid5, raid6) are performed in steps dictated by the storage array. Each
+ required step will be attempted until the request fails which is likely because of the required expansion time.
+ - raidUnsupported will be treated as raid0, raidAll as raidDiskPool and raid3 as raid5.
+ - Tray loss protection and drawer loss protection will be chosen if at all possible.
+"""
+EXAMPLES = """
+- name: No disk groups
+ netapp_e_storagepool:
+ ssid: "{{ ssid }}"
+ name: "{{ item }}"
+ state: absent
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+"""
+RETURN = """
+msg:
+ description: Success message
+ returned: success
+ type: str
+ sample: Json facts for the pool that was created.
+"""
+import functools
+from itertools import groupby
+from time import sleep
+from pprint import pformat
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.netapp import NetAppESeriesModule
+from ansible.module_utils._text import to_native
+
+
+def get_most_common_elements(iterator):
+ """Returns a generator containing a descending list of most common elements."""
+ if not isinstance(iterator, list):
+ raise TypeError("iterator must be a list.")
+
+ grouped = [(key, len(list(group))) for key, group in groupby(sorted(iterator))]
+ return sorted(grouped, key=lambda x: x[1], reverse=True)
+
+
+def memoize(func):
+ """Generic memoizer for any function with any number of arguments including zero."""
+
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ class MemoizeFuncArgs(dict):
+ def __missing__(self, _key):
+ self[_key] = func(*args, **kwargs)
+ return self[_key]
+
+ key = str((args, kwargs)) if args and kwargs else "no_argument_response"
+ return MemoizeFuncArgs().__getitem__(key)
+
+ return wrapper
+
+
+class NetAppESeriesStoragePool(NetAppESeriesModule):
+ EXPANSION_TIMEOUT_SEC = 10
+ DEFAULT_DISK_POOL_MINIMUM_DISK_COUNT = 11
+
+ def __init__(self):
+ version = "02.00.0000.0000"
+ ansible_options = dict(
+ state=dict(required=True, choices=["present", "absent"], type="str"),
+ name=dict(required=True, type="str"),
+ criteria_size_unit=dict(choices=["bytes", "b", "kb", "mb", "gb", "tb", "pb", "eb", "zb", "yb"],
+ default="gb", type="str"),
+ criteria_drive_count=dict(type="int"),
+ criteria_drive_interface_type=dict(choices=["sas", "sas4k", "fibre", "fibre520b", "scsi", "sata", "pata"],
+ type="str"),
+ criteria_drive_type=dict(choices=["ssd", "hdd"], type="str", required=False),
+ criteria_drive_min_size=dict(type="float"),
+ criteria_drive_require_da=dict(type="bool", required=False),
+ criteria_drive_require_fde=dict(type="bool", required=False),
+ criteria_min_usable_capacity=dict(type="float"),
+ raid_level=dict(choices=["raidAll", "raid0", "raid1", "raid3", "raid5", "raid6", "raidDiskPool"],
+ default="raidDiskPool"),
+ erase_secured_drives=dict(type="bool", default=True),
+ secure_pool=dict(type="bool", default=False),
+ reserve_drive_count=dict(type="int"),
+ remove_volumes=dict(type="bool", default=True))
+
+ required_if = [["state", "present", ["raid_level"]]]
+ super(NetAppESeriesStoragePool, self).__init__(ansible_options=ansible_options,
+ web_services_version=version,
+ supports_check_mode=True,
+ required_if=required_if)
+
+ args = self.module.params
+ self.state = args["state"]
+ self.ssid = args["ssid"]
+ self.name = args["name"]
+ self.criteria_drive_count = args["criteria_drive_count"]
+ self.criteria_min_usable_capacity = args["criteria_min_usable_capacity"]
+ self.criteria_size_unit = args["criteria_size_unit"]
+ self.criteria_drive_min_size = args["criteria_drive_min_size"]
+ self.criteria_drive_type = args["criteria_drive_type"]
+ self.criteria_drive_interface_type = args["criteria_drive_interface_type"]
+ self.criteria_drive_require_fde = args["criteria_drive_require_fde"]
+ self.criteria_drive_require_da = args["criteria_drive_require_da"]
+ self.raid_level = args["raid_level"]
+ self.erase_secured_drives = args["erase_secured_drives"]
+ self.secure_pool = args["secure_pool"]
+ self.reserve_drive_count = args["reserve_drive_count"]
+ self.remove_volumes = args["remove_volumes"]
+ self.pool_detail = None
+
+ # Change all sizes to be measured in bytes
+ if self.criteria_min_usable_capacity:
+ self.criteria_min_usable_capacity = int(self.criteria_min_usable_capacity *
+ self.SIZE_UNIT_MAP[self.criteria_size_unit])
+ if self.criteria_drive_min_size:
+ self.criteria_drive_min_size = int(self.criteria_drive_min_size *
+ self.SIZE_UNIT_MAP[self.criteria_size_unit])
+ self.criteria_size_unit = "bytes"
+
+ # Adjust unused raid level option to reflect documentation
+ if self.raid_level == "raidAll":
+ self.raid_level = "raidDiskPool"
+ if self.raid_level == "raid3":
+ self.raid_level = "raid5"
+
+ @property
+ @memoize
+ def available_drives(self):
+ """Determine the list of available drives"""
+ return [drive["id"] for drive in self.drives if drive["available"] and drive["status"] == "optimal"]
+
+ @property
+ @memoize
+ def available_drive_types(self):
+ """Determine the types of available drives sorted by the most common first."""
+ types = [drive["driveMediaType"] for drive in self.drives]
+ return [entry[0] for entry in get_most_common_elements(types)]
+
+ @property
+ @memoize
+ def available_drive_interface_types(self):
+ """Determine the types of available drives."""
+ interfaces = [drive["phyDriveType"] for drive in self.drives]
+ return [entry[0] for entry in get_most_common_elements(interfaces)]
+
+ @property
+ def storage_pool_drives(self):
+ """Retrieve list of drives found in storage pool."""
+ return [drive for drive in self.drives if drive["currentVolumeGroupRef"] == self.pool_detail["id"] and not drive["hotSpare"]]
+
+ @property
+ def expandable_drive_count(self):
+ """Maximum number of drives that a storage pool can be expanded at a given time."""
+ capabilities = None
+ if self.raid_level == "raidDiskPool":
+ return len(self.available_drives)
+
+ try:
+ rc, capabilities = self.request("storage-systems/%s/capabilities" % self.ssid)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to fetch maximum expandable drive count. Array id [%s]. Error[%s]."
+ % (self.ssid, to_native(error)))
+
+ return capabilities["featureParameters"]["maxDCEDrives"]
+
+ @property
+ def disk_pool_drive_minimum(self):
+ """Provide the storage array's minimum disk pool drive count."""
+ rc, attr = self.request("storage-systems/%s/symbol/getSystemAttributeDefaults" % self.ssid, ignore_errors=True)
+
+ # Standard minimum is 11 drives but some allow 10 drives. 10 will be the default
+ if (rc != 200 or "minimumDriveCount" not in attr["defaults"]["diskPoolDefaultAttributes"].keys() or
+ attr["defaults"]["diskPoolDefaultAttributes"]["minimumDriveCount"] == 0):
+ return self.DEFAULT_DISK_POOL_MINIMUM_DISK_COUNT
+
+ return attr["defaults"]["diskPoolDefaultAttributes"]["minimumDriveCount"]
+
+ def get_available_drive_capacities(self, drive_id_list=None):
+ """Determine the list of available drive capacities."""
+ if drive_id_list:
+ available_drive_capacities = set([int(drive["usableCapacity"]) for drive in self.drives
+ if drive["id"] in drive_id_list and drive["available"] and
+ drive["status"] == "optimal"])
+ else:
+ available_drive_capacities = set([int(drive["usableCapacity"]) for drive in self.drives
+ if drive["available"] and drive["status"] == "optimal"])
+
+ self.module.log("available drive capacities: %s" % available_drive_capacities)
+ return list(available_drive_capacities)
+
+ @property
+ def drives(self):
+ """Retrieve list of drives found in storage pool."""
+ drives = None
+ try:
+ rc, drives = self.request("storage-systems/%s/drives" % self.ssid)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to fetch disk drives. Array id [%s]. Error[%s]."
+ % (self.ssid, to_native(error)))
+
+ return drives
+
+ def is_drive_count_valid(self, drive_count):
+ """Validate drive count criteria is met."""
+ if self.criteria_drive_count and drive_count < self.criteria_drive_count:
+ return False
+
+ if self.raid_level == "raidDiskPool":
+ return drive_count >= self.disk_pool_drive_minimum
+ if self.raid_level == "raid0":
+ return drive_count > 0
+ if self.raid_level == "raid1":
+ return drive_count >= 2 and (drive_count % 2) == 0
+ if self.raid_level in ["raid3", "raid5"]:
+ return 3 <= drive_count <= 30
+ if self.raid_level == "raid6":
+ return 5 <= drive_count <= 30
+ return False
+
+ @property
+ def storage_pool(self):
+ """Retrieve storage pool information."""
+ storage_pools_resp = None
+ try:
+ rc, storage_pools_resp = self.request("storage-systems/%s/storage-pools" % self.ssid)
+ except Exception as err:
+ self.module.fail_json(msg="Failed to get storage pools. Array id [%s]. Error[%s]. State[%s]."
+ % (self.ssid, to_native(err), self.state))
+
+ pool_detail = [pool for pool in storage_pools_resp if pool["name"] == self.name]
+ return pool_detail[0] if pool_detail else dict()
+
+ @property
+ def storage_pool_volumes(self):
+ """Retrieve list of volumes associated with storage pool."""
+ volumes_resp = None
+ try:
+ rc, volumes_resp = self.request("storage-systems/%s/volumes" % self.ssid)
+ except Exception as err:
+ self.module.fail_json(msg="Failed to get storage pools. Array id [%s]. Error[%s]. State[%s]."
+ % (self.ssid, to_native(err), self.state))
+
+ group_ref = self.storage_pool["volumeGroupRef"]
+ storage_pool_volume_list = [volume["id"] for volume in volumes_resp if volume["volumeGroupRef"] == group_ref]
+ return storage_pool_volume_list
+
+ def get_ddp_capacity(self, expansion_drive_list):
+ """Return the total usable capacity based on the additional drives."""
+
+ def get_ddp_error_percent(_drive_count, _extent_count):
+ """Determine the space reserved for reconstruction"""
+ if _drive_count <= 36:
+ if _extent_count <= 600:
+ return 0.40
+ elif _extent_count <= 1400:
+ return 0.35
+ elif _extent_count <= 6200:
+ return 0.20
+ elif _extent_count <= 50000:
+ return 0.15
+ elif _drive_count <= 64:
+ if _extent_count <= 600:
+ return 0.20
+ elif _extent_count <= 1400:
+ return 0.15
+ elif _extent_count <= 6200:
+ return 0.10
+ elif _extent_count <= 50000:
+ return 0.05
+ elif _drive_count <= 480:
+ if _extent_count <= 600:
+ return 0.20
+ elif _extent_count <= 1400:
+ return 0.15
+ elif _extent_count <= 6200:
+ return 0.10
+ elif _extent_count <= 50000:
+ return 0.05
+
+ self.module.fail_json(msg="Drive count exceeded the error percent table. Array[%s]" % self.ssid)
+
+ def get_ddp_reserved_drive_count(_disk_count):
+ """Determine the number of reserved drive."""
+ reserve_count = 0
+
+ if self.reserve_drive_count:
+ reserve_count = self.reserve_drive_count
+ elif _disk_count >= 256:
+ reserve_count = 8
+ elif _disk_count >= 192:
+ reserve_count = 7
+ elif _disk_count >= 128:
+ reserve_count = 6
+ elif _disk_count >= 64:
+ reserve_count = 4
+ elif _disk_count >= 32:
+ reserve_count = 3
+ elif _disk_count >= 12:
+ reserve_count = 2
+ elif _disk_count == 11:
+ reserve_count = 1
+
+ return reserve_count
+
+ if self.pool_detail:
+ drive_count = len(self.storage_pool_drives) + len(expansion_drive_list)
+ else:
+ drive_count = len(expansion_drive_list)
+
+ drive_usable_capacity = min(min(self.get_available_drive_capacities()),
+ min(self.get_available_drive_capacities(expansion_drive_list)))
+ drive_data_extents = ((drive_usable_capacity - 8053063680) / 536870912)
+ maximum_stripe_count = (drive_count * drive_data_extents) / 10
+
+ error_percent = get_ddp_error_percent(drive_count, drive_data_extents)
+ error_overhead = (drive_count * drive_data_extents / 10 * error_percent + 10) / 10
+
+ total_stripe_count = maximum_stripe_count - error_overhead
+ stripe_count_per_drive = total_stripe_count / drive_count
+ reserved_stripe_count = get_ddp_reserved_drive_count(drive_count) * stripe_count_per_drive
+ available_stripe_count = total_stripe_count - reserved_stripe_count
+
+ return available_stripe_count * 4294967296
+
+ @memoize
+ def get_candidate_drives(self):
+ """Retrieve set of drives candidates for creating a new storage pool."""
+
+ def get_candidate_drive_request():
+ """Perform request for new volume creation."""
+ candidates_list = list()
+ drive_types = [self.criteria_drive_type] if self.criteria_drive_type else self.available_drive_types
+ interface_types = [self.criteria_drive_interface_type] \
+ if self.criteria_drive_interface_type else self.available_drive_interface_types
+
+ for interface_type in interface_types:
+ for drive_type in drive_types:
+ candidates = None
+ volume_candidate_request_data = dict(
+ type="diskPool" if self.raid_level == "raidDiskPool" else "traditional",
+ diskPoolVolumeCandidateRequestData=dict(
+ reconstructionReservedDriveCount=65535))
+ candidate_selection_type = dict(
+ candidateSelectionType="count",
+ driveRefList=dict(driveRef=self.available_drives))
+ criteria = dict(raidLevel=self.raid_level,
+ phyDriveType=interface_type,
+ dssPreallocEnabled=False,
+ securityType="capable" if self.criteria_drive_require_fde else "none",
+ driveMediaType=drive_type,
+ onlyProtectionInformationCapable=True if self.criteria_drive_require_da else False,
+ volumeCandidateRequestData=volume_candidate_request_data,
+ allocateReserveSpace=False,
+ securityLevel="fde" if self.criteria_drive_require_fde else "none",
+ candidateSelectionType=candidate_selection_type)
+
+ try:
+ rc, candidates = self.request("storage-systems/%s/symbol/getVolumeCandidates?verboseError"
+ "Response=true" % self.ssid, data=criteria, method="POST")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve volume candidates. Array [%s]. Error [%s]."
+ % (self.ssid, to_native(error)))
+
+ if candidates:
+ candidates_list.extend(candidates["volumeCandidate"])
+
+ # Sort output based on tray and then drawer protection first
+ tray_drawer_protection = list()
+ tray_protection = list()
+ drawer_protection = list()
+ no_protection = list()
+ sorted_candidates = list()
+ for item in candidates_list:
+ if item["trayLossProtection"]:
+ if item["drawerLossProtection"]:
+ tray_drawer_protection.append(item)
+ else:
+ tray_protection.append(item)
+ elif item["drawerLossProtection"]:
+ drawer_protection.append(item)
+ else:
+ no_protection.append(item)
+
+ if tray_drawer_protection:
+ sorted_candidates.extend(tray_drawer_protection)
+ if tray_protection:
+ sorted_candidates.extend(tray_protection)
+ if drawer_protection:
+ sorted_candidates.extend(drawer_protection)
+ if no_protection:
+ sorted_candidates.extend(no_protection)
+
+ return sorted_candidates
+
+ # Determine the appropriate candidate list
+ for candidate in get_candidate_drive_request():
+
+ # Evaluate candidates for required drive count, collective drive usable capacity and minimum drive size
+ if self.criteria_drive_count:
+ if self.criteria_drive_count != int(candidate["driveCount"]):
+ continue
+ if self.criteria_min_usable_capacity:
+ if ((self.raid_level == "raidDiskPool" and self.criteria_min_usable_capacity >
+ self.get_ddp_capacity(candidate["driveRefList"]["driveRef"])) or
+ self.criteria_min_usable_capacity > int(candidate["usableSize"])):
+ continue
+ if self.criteria_drive_min_size:
+ if self.criteria_drive_min_size > min(self.get_available_drive_capacities(candidate["driveRefList"]["driveRef"])):
+ continue
+
+ return candidate
+
+ self.module.fail_json(msg="Not enough drives to meet the specified criteria. Array [%s]." % self.ssid)
+
+ @memoize
+ def get_expansion_candidate_drives(self):
+ """Retrieve required expansion drive list.
+
+ Note: To satisfy the expansion criteria each item in the candidate list must added specified group since there
+ is a potential limitation on how many drives can be incorporated at a time.
+ * Traditional raid volume groups must be added two drives maximum at a time. No limits on raid disk pools.
+
+ :return list(candidate): list of candidate structures from the getVolumeGroupExpansionCandidates symbol endpoint
+ """
+
+ def get_expansion_candidate_drive_request():
+ """Perform the request for expanding existing volume groups or disk pools.
+
+ Note: the list of candidate structures do not necessarily produce candidates that meet all criteria.
+ """
+ candidates_list = None
+ url = "storage-systems/%s/symbol/getVolumeGroupExpansionCandidates?verboseErrorResponse=true" % self.ssid
+ if self.raid_level == "raidDiskPool":
+ url = "storage-systems/%s/symbol/getDiskPoolExpansionCandidates?verboseErrorResponse=true" % self.ssid
+
+ try:
+ rc, candidates_list = self.request(url, method="POST", data=self.pool_detail["id"])
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve volume candidates. Array [%s]. Error [%s]."
+ % (self.ssid, to_native(error)))
+
+ return candidates_list["candidates"]
+
+ required_candidate_list = list()
+ required_additional_drives = 0
+ required_additional_capacity = 0
+ total_required_capacity = 0
+
+ # determine whether and how much expansion is need to satisfy the specified criteria
+ if self.criteria_min_usable_capacity:
+ total_required_capacity = self.criteria_min_usable_capacity
+ required_additional_capacity = self.criteria_min_usable_capacity - int(self.pool_detail["totalRaidedSpace"])
+
+ if self.criteria_drive_count:
+ required_additional_drives = self.criteria_drive_count - len(self.storage_pool_drives)
+
+ # Determine the appropriate expansion candidate list
+ if required_additional_drives > 0 or required_additional_capacity > 0:
+ for candidate in get_expansion_candidate_drive_request():
+
+ if self.criteria_drive_min_size:
+ if self.criteria_drive_min_size > min(self.get_available_drive_capacities(candidate["drives"])):
+ continue
+
+ if self.raid_level == "raidDiskPool":
+ if (len(candidate["drives"]) >= required_additional_drives and
+ self.get_ddp_capacity(candidate["drives"]) >= total_required_capacity):
+ required_candidate_list.append(candidate)
+ break
+ else:
+ required_additional_drives -= len(candidate["drives"])
+ required_additional_capacity -= int(candidate["usableCapacity"])
+ required_candidate_list.append(candidate)
+
+ # Determine if required drives and capacities are satisfied
+ if required_additional_drives <= 0 and required_additional_capacity <= 0:
+ break
+ else:
+ self.module.fail_json(msg="Not enough drives to meet the specified criteria. Array [%s]." % self.ssid)
+
+ return required_candidate_list
+
+ def get_reserve_drive_count(self):
+ """Retrieve the current number of reserve drives for raidDiskPool (Only for raidDiskPool)."""
+
+ if not self.pool_detail:
+ self.module.fail_json(msg="The storage pool must exist. Array [%s]." % self.ssid)
+
+ if self.raid_level != "raidDiskPool":
+ self.module.fail_json(msg="The storage pool must be a raidDiskPool. Pool [%s]. Array [%s]."
+ % (self.pool_detail["id"], self.ssid))
+
+ return self.pool_detail["volumeGroupData"]["diskPoolData"]["reconstructionReservedDriveCount"]
+
+ def get_maximum_reserve_drive_count(self):
+ """Retrieve the maximum number of reserve drives for storage pool (Only for raidDiskPool)."""
+ if self.raid_level != "raidDiskPool":
+ self.module.fail_json(msg="The storage pool must be a raidDiskPool. Pool [%s]. Array [%s]."
+ % (self.pool_detail["id"], self.ssid))
+
+ drives_ids = list()
+
+ if self.pool_detail:
+ drives_ids.extend(self.storage_pool_drives)
+ for candidate in self.get_expansion_candidate_drives():
+ drives_ids.extend((candidate["drives"]))
+ else:
+ candidate = self.get_candidate_drives()
+ drives_ids.extend(candidate["driveRefList"]["driveRef"])
+
+ drive_count = len(drives_ids)
+ maximum_reserve_drive_count = min(int(drive_count * 0.2 + 1), drive_count - 10)
+ if maximum_reserve_drive_count > 10:
+ maximum_reserve_drive_count = 10
+
+ return maximum_reserve_drive_count
+
+ def set_reserve_drive_count(self, check_mode=False):
+ """Set the reserve drive count for raidDiskPool."""
+ changed = False
+
+ if self.raid_level == "raidDiskPool" and self.reserve_drive_count:
+ maximum_count = self.get_maximum_reserve_drive_count()
+
+ if self.reserve_drive_count < 0 or self.reserve_drive_count > maximum_count:
+ self.module.fail_json(msg="Supplied reserve drive count is invalid or exceeds the maximum allowed. "
+ "Note that it may be necessary to wait for expansion operations to complete "
+ "before the adjusting the reserve drive count. Maximum [%s]. Array [%s]."
+ % (maximum_count, self.ssid))
+
+ if self.reserve_drive_count != self.get_reserve_drive_count():
+ changed = True
+
+ if not check_mode:
+ try:
+ rc, resp = self.request("storage-systems/%s/symbol/setDiskPoolReservedDriveCount" % self.ssid,
+ method="POST", data=dict(volumeGroupRef=self.pool_detail["id"],
+ newDriveCount=self.reserve_drive_count))
+ except Exception as error:
+ self.module.fail_json(msg="Failed to set reserve drive count for disk pool. Disk Pool [%s]."
+ " Array [%s]." % (self.pool_detail["id"], self.ssid))
+
+ return changed
+
+ def erase_all_available_secured_drives(self, check_mode=False):
+ """Erase all available drives that have encryption at rest feature enabled."""
+ changed = False
+ drives_list = list()
+ for drive in self.drives:
+ if drive["available"] and drive["fdeEnabled"]:
+ changed = True
+ drives_list.append(drive["id"])
+
+ if drives_list and not check_mode:
+ try:
+ rc, resp = self.request("storage-systems/%s/symbol/reprovisionDrive?verboseErrorResponse=true"
+ % self.ssid, method="POST", data=dict(driveRef=drives_list))
+ except Exception as error:
+ self.module.fail_json(msg="Failed to erase all secured drives. Array [%s]" % self.ssid)
+
+ return changed
+
+ def create_storage_pool(self):
+ """Create new storage pool."""
+ url = "storage-systems/%s/symbol/createVolumeGroup?verboseErrorResponse=true" % self.ssid
+ request_body = dict(label=self.name,
+ candidate=self.get_candidate_drives())
+
+ if self.raid_level == "raidDiskPool":
+ url = "storage-systems/%s/symbol/createDiskPool?verboseErrorResponse=true" % self.ssid
+
+ request_body.update(
+ dict(backgroundOperationPriority="useDefault",
+ criticalReconstructPriority="useDefault",
+ degradedReconstructPriority="useDefault",
+ poolUtilizationCriticalThreshold=65535,
+ poolUtilizationWarningThreshold=0))
+
+ if self.reserve_drive_count:
+ request_body.update(dict(volumeCandidateData=dict(
+ diskPoolVolumeCandidateData=dict(reconstructionReservedDriveCount=self.reserve_drive_count))))
+
+ try:
+ rc, resp = self.request(url, method="POST", data=request_body)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to create storage pool. Array id [%s]. Error[%s]."
+ % (self.ssid, to_native(error)))
+
+ # Update drive and storage pool information
+ self.pool_detail = self.storage_pool
+
+ def delete_storage_pool(self):
+ """Delete storage pool."""
+ storage_pool_drives = [drive["id"] for drive in self.storage_pool_drives if drive["fdeEnabled"]]
+ try:
+ delete_volumes_parameter = "?delete-volumes=true" if self.remove_volumes else ""
+ rc, resp = self.request("storage-systems/%s/storage-pools/%s%s"
+ % (self.ssid, self.pool_detail["id"], delete_volumes_parameter), method="DELETE")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to delete storage pool. Pool id [%s]. Array id [%s]. Error[%s]."
+ % (self.pool_detail["id"], self.ssid, to_native(error)))
+
+ if storage_pool_drives and self.erase_secured_drives:
+ try:
+ rc, resp = self.request("storage-systems/%s/symbol/reprovisionDrive?verboseErrorResponse=true"
+ % self.ssid, method="POST", data=dict(driveRef=storage_pool_drives))
+ except Exception as error:
+ self.module.fail_json(msg="Failed to erase drives prior to creating new storage pool. Array [%s]."
+ " Error [%s]." % (self.ssid, to_native(error)))
+
+ def secure_storage_pool(self, check_mode=False):
+ """Enable security on an existing storage pool"""
+ self.pool_detail = self.storage_pool
+ needs_secure_pool = False
+
+ if not self.secure_pool and self.pool_detail["securityType"] == "enabled":
+ self.module.fail_json(msg="It is not possible to disable storage pool security! See array documentation.")
+ if self.secure_pool and self.pool_detail["securityType"] != "enabled":
+ needs_secure_pool = True
+
+ if needs_secure_pool and not check_mode:
+ try:
+ rc, resp = self.request("storage-systems/%s/storage-pools/%s" % (self.ssid, self.pool_detail["id"]),
+ data=dict(securePool=True), method="POST")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to secure storage pool. Pool id [%s]. Array [%s]. Error"
+ " [%s]." % (self.pool_detail["id"], self.ssid, to_native(error)))
+
+ self.pool_detail = self.storage_pool
+ return needs_secure_pool
+
+ def migrate_raid_level(self, check_mode=False):
+ """Request storage pool raid level migration."""
+ needs_migration = self.raid_level != self.pool_detail["raidLevel"]
+ if needs_migration and self.pool_detail["raidLevel"] == "raidDiskPool":
+ self.module.fail_json(msg="Raid level cannot be changed for disk pools")
+
+ if needs_migration and not check_mode:
+ sp_raid_migrate_req = dict(raidLevel=self.raid_level)
+
+ try:
+ rc, resp = self.request("storage-systems/%s/storage-pools/%s/raid-type-migration"
+ % (self.ssid, self.name), data=sp_raid_migrate_req, method="POST")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to change the raid level of storage pool. Array id [%s]."
+ " Error[%s]." % (self.ssid, to_native(error)))
+
+ self.pool_detail = self.storage_pool
+ return needs_migration
+
+ def expand_storage_pool(self, check_mode=False):
+ """Add drives to existing storage pool.
+
+ :return bool: whether drives were required to be added to satisfy the specified criteria."""
+ expansion_candidate_list = self.get_expansion_candidate_drives()
+ changed_required = bool(expansion_candidate_list)
+ estimated_completion_time = 0.0
+
+ # build expandable groupings of traditional raid candidate
+ required_expansion_candidate_list = list()
+ while expansion_candidate_list:
+ subset = list()
+ while expansion_candidate_list and len(subset) < self.expandable_drive_count:
+ subset.extend(expansion_candidate_list.pop()["drives"])
+ required_expansion_candidate_list.append(subset)
+
+ if required_expansion_candidate_list and not check_mode:
+ url = "storage-systems/%s/symbol/startVolumeGroupExpansion?verboseErrorResponse=true" % self.ssid
+ if self.raid_level == "raidDiskPool":
+ url = "storage-systems/%s/symbol/startDiskPoolExpansion?verboseErrorResponse=true" % self.ssid
+
+ while required_expansion_candidate_list:
+ candidate_drives_list = required_expansion_candidate_list.pop()
+ request_body = dict(volumeGroupRef=self.pool_detail["volumeGroupRef"],
+ driveRef=candidate_drives_list)
+ try:
+ rc, resp = self.request(url, method="POST", data=request_body)
+ except Exception as error:
+ rc, actions_resp = self.request("storage-systems/%s/storage-pools/%s/action-progress"
+ % (self.ssid, self.pool_detail["id"]), ignore_errors=True)
+ if rc == 200 and actions_resp:
+ actions = [action["currentAction"] for action in actions_resp
+ if action["volumeRef"] in self.storage_pool_volumes]
+ self.module.fail_json(msg="Failed to add drives to the storage pool possibly because of actions"
+ " in progress. Actions [%s]. Pool id [%s]. Array id [%s]. Error[%s]."
+ % (", ".join(actions), self.pool_detail["id"], self.ssid,
+ to_native(error)))
+
+ self.module.fail_json(msg="Failed to add drives to storage pool. Pool id [%s]. Array id [%s]."
+ " Error[%s]." % (self.pool_detail["id"], self.ssid, to_native(error)))
+
+ # Wait for expansion completion unless it is the last request in the candidate list
+ if required_expansion_candidate_list:
+ for dummy in range(self.EXPANSION_TIMEOUT_SEC):
+ rc, actions_resp = self.request("storage-systems/%s/storage-pools/%s/action-progress"
+ % (self.ssid, self.pool_detail["id"]), ignore_errors=True)
+ if rc == 200:
+ for action in actions_resp:
+ if (action["volumeRef"] in self.storage_pool_volumes and
+ action["currentAction"] == "remappingDce"):
+ sleep(1)
+ estimated_completion_time = action["estimatedTimeToCompletion"]
+ break
+ else:
+ estimated_completion_time = 0.0
+ break
+
+ return changed_required, estimated_completion_time
+
+ def apply(self):
+ """Apply requested state to storage array."""
+ changed = False
+
+ if self.state == "present":
+ if self.criteria_drive_count is None and self.criteria_min_usable_capacity is None:
+ self.module.fail_json(msg="One of criteria_min_usable_capacity or criteria_drive_count must be"
+ " specified.")
+ if self.criteria_drive_count and not self.is_drive_count_valid(self.criteria_drive_count):
+ self.module.fail_json(msg="criteria_drive_count must be valid for the specified raid level.")
+
+ self.pool_detail = self.storage_pool
+ self.module.log(pformat(self.pool_detail))
+
+ if self.state == "present" and self.erase_secured_drives:
+ self.erase_all_available_secured_drives(check_mode=True)
+
+ # Determine whether changes need to be applied to the storage array
+ if self.pool_detail:
+
+ if self.state == "absent":
+ changed = True
+
+ elif self.state == "present":
+
+ if self.criteria_drive_count and self.criteria_drive_count < len(self.storage_pool_drives):
+ self.module.fail_json(msg="Failed to reduce the size of the storage pool. Array [%s]. Pool [%s]."
+ % (self.ssid, self.pool_detail["id"]))
+
+ if self.criteria_drive_type and self.criteria_drive_type != self.pool_detail["driveMediaType"]:
+ self.module.fail_json(msg="Failed! It is not possible to modify storage pool media type."
+ " Array [%s]. Pool [%s]." % (self.ssid, self.pool_detail["id"]))
+
+ if (self.criteria_drive_require_da is not None and self.criteria_drive_require_da !=
+ self.pool_detail["protectionInformationCapabilities"]["protectionInformationCapable"]):
+ self.module.fail_json(msg="Failed! It is not possible to modify DA-capability. Array [%s]."
+ " Pool [%s]." % (self.ssid, self.pool_detail["id"]))
+
+ # Evaluate current storage pool for required change.
+ needs_expansion, estimated_completion_time = self.expand_storage_pool(check_mode=True)
+ if needs_expansion:
+ changed = True
+ if self.migrate_raid_level(check_mode=True):
+ changed = True
+ if self.secure_storage_pool(check_mode=True):
+ changed = True
+ if self.set_reserve_drive_count(check_mode=True):
+ changed = True
+
+ elif self.state == "present":
+ changed = True
+
+ # Apply changes to storage array
+ msg = "No changes were required for the storage pool [%s]."
+ if changed and not self.module.check_mode:
+ if self.state == "present":
+ if self.erase_secured_drives:
+ self.erase_all_available_secured_drives()
+
+ if self.pool_detail:
+ change_list = list()
+
+ # Expansion needs to occur before raid level migration to account for any sizing needs.
+ expanded, estimated_completion_time = self.expand_storage_pool()
+ if expanded:
+ change_list.append("expanded")
+ if self.migrate_raid_level():
+ change_list.append("raid migration")
+ if self.secure_storage_pool():
+ change_list.append("secured")
+ if self.set_reserve_drive_count():
+ change_list.append("adjusted reserve drive count")
+
+ if change_list:
+ msg = "Following changes have been applied to the storage pool [%s]: " + ", ".join(change_list)
+
+ if expanded:
+ msg += "\nThe expansion operation will complete in an estimated %s minutes."\
+ % estimated_completion_time
+ else:
+ self.create_storage_pool()
+ msg = "Storage pool [%s] was created."
+
+ if self.secure_storage_pool():
+ msg = "Storage pool [%s] was created and secured."
+ if self.set_reserve_drive_count():
+ msg += " Adjusted reserve drive count."
+
+ elif self.pool_detail:
+ self.delete_storage_pool()
+ msg = "Storage pool [%s] removed."
+
+ self.pool_detail = self.storage_pool
+ self.module.log(pformat(self.pool_detail))
+ self.module.log(msg % self.name)
+ self.module.exit_json(msg=msg % self.name, changed=changed, **self.pool_detail)
+
+
+def main():
+ storage_pool = NetAppESeriesStoragePool()
+ storage_pool.apply()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_syslog.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_syslog.py
new file mode 100644
index 000000000..1e6e85886
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_syslog.py
@@ -0,0 +1,286 @@
+#!/usr/bin/python
+
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['deprecated'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = """
+---
+module: netapp_e_syslog
+short_description: NetApp E-Series manage syslog settings
+description:
+ - Allow the syslog settings to be configured for an individual E-Series storage-system
+version_added: '2.7'
+author: Nathan Swartz (@ndswartz)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.netapp.eseries
+options:
+ state:
+ description:
+ - Add or remove the syslog server configuration for E-Series storage array.
+ - Existing syslog server configuration will be removed or updated when its address matches I(address).
+ - Fully qualified hostname that resolve to an IPv4 address that matches I(address) will not be
+ treated as a match.
+ choices:
+ - present
+ - absent
+ type: str
+ default: present
+ address:
+ description:
+ - The syslog server's IPv4 address or a fully qualified hostname.
+ - All existing syslog configurations will be removed when I(state=absent) and I(address=None).
+ type: str
+ port:
+ description:
+ - This is the port the syslog server is using.
+ default: 514
+ type: int
+ protocol:
+ description:
+ - This is the transmission protocol the syslog server's using to receive syslog messages.
+ choices:
+ - udp
+ - tcp
+ - tls
+ default: udp
+ type: str
+ components:
+ description:
+ - The e-series logging components define the specific logs to transfer to the syslog server.
+ - At the time of writing, 'auditLog' is the only logging component but more may become available.
+ default: ["auditLog"]
+ type: list
+ test:
+ description:
+ - This forces a test syslog message to be sent to the stated syslog server.
+ - Only attempts transmission when I(state=present).
+ type: bool
+ default: no
+ log_path:
+ description:
+ - This argument specifies a local path for logging purposes.
+ type: str
+ required: no
+notes:
+ - Check mode is supported.
+ - This API is currently only supported with the Embedded Web Services API v2.12 (bundled with
+ SANtricity OS 11.40.2) and higher.
+"""
+
+EXAMPLES = """
+ - name: Add two syslog server configurations to NetApp E-Series storage array.
+ netapp_e_syslog:
+ state: present
+ address: "{{ item }}"
+ port: 514
+ protocol: tcp
+ component: "auditLog"
+ api_url: "10.1.1.1:8443"
+ api_username: "admin"
+ api_password: "myPass"
+ loop:
+ - "192.168.1.1"
+ - "192.168.1.100"
+"""
+
+RETURN = """
+msg:
+ description: Success message
+ returned: on success
+ type: str
+ sample: The settings have been updated.
+syslog:
+ description:
+ - True if syslog server configuration has been added to e-series storage array.
+ returned: on success
+ sample: True
+ type: bool
+"""
+
+import json
+import logging
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.netapp import request, eseries_host_argument_spec
+from ansible.module_utils._text import to_native
+
+HEADERS = {
+ "Content-Type": "application/json",
+ "Accept": "application/json",
+}
+
+
+class Syslog(object):
+ def __init__(self):
+ argument_spec = eseries_host_argument_spec()
+ argument_spec.update(dict(
+ state=dict(choices=["present", "absent"], required=False, default="present"),
+ address=dict(type="str", required=False),
+ port=dict(type="int", default=514, required=False),
+ protocol=dict(choices=["tcp", "tls", "udp"], default="udp", required=False),
+ components=dict(type="list", required=False, default=["auditLog"]),
+ test=dict(type="bool", default=False, require=False),
+ log_path=dict(type="str", required=False),
+ ))
+
+ required_if = [
+ ["state", "present", ["address", "port", "protocol", "components"]],
+ ]
+
+ mutually_exclusive = [
+ ["test", "absent"],
+ ]
+
+ self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_if=required_if,
+ mutually_exclusive=mutually_exclusive)
+ args = self.module.params
+
+ self.syslog = args["state"] in ["present"]
+ self.address = args["address"]
+ self.port = args["port"]
+ self.protocol = args["protocol"]
+ self.components = args["components"]
+ self.test = args["test"]
+ self.ssid = args["ssid"]
+ self.url = args["api_url"]
+ self.creds = dict(url_password=args["api_password"],
+ validate_certs=args["validate_certs"],
+ url_username=args["api_username"], )
+
+ self.components.sort()
+
+ self.check_mode = self.module.check_mode
+
+ # logging setup
+ log_path = args["log_path"]
+ self._logger = logging.getLogger(self.__class__.__name__)
+ if log_path:
+ logging.basicConfig(
+ level=logging.DEBUG, filename=log_path, filemode='w',
+ format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s')
+
+ if not self.url.endswith('/'):
+ self.url += '/'
+
+ def get_configuration(self):
+ """Retrieve existing syslog configuration."""
+ try:
+ (rc, result) = request(self.url + "storage-systems/{0}/syslog".format(self.ssid),
+ headers=HEADERS, **self.creds)
+ return result
+ except Exception as err:
+ self.module.fail_json(msg="Failed to retrieve syslog configuration! Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(err)))
+
+ def test_configuration(self, body):
+ """Send test syslog message to the storage array.
+
+ Allows fix number of retries to occur before failure is issued to give the storage array time to create
+ new syslog server record.
+ """
+ try:
+ (rc, result) = request(self.url + "storage-systems/{0}/syslog/{1}/test".format(self.ssid, body["id"]),
+ method='POST', headers=HEADERS, **self.creds)
+ except Exception as err:
+ self.module.fail_json(
+ msg="We failed to send test message! Array Id [{0}]. Error [{1}].".format(self.ssid, to_native(err)))
+
+ def update_configuration(self):
+ """Post the syslog request to array."""
+ config_match = None
+ perfect_match = None
+ update = False
+ body = dict()
+
+ # search existing configuration for syslog server entry match
+ configs = self.get_configuration()
+ if self.address:
+ for config in configs:
+ if config["serverAddress"] == self.address:
+ config_match = config
+ if (config["port"] == self.port and config["protocol"] == self.protocol and
+ len(config["components"]) == len(self.components) and
+ all([component["type"] in self.components for component in config["components"]])):
+ perfect_match = config_match
+ break
+
+ # generate body for the http request
+ if self.syslog:
+ if not perfect_match:
+ update = True
+ if config_match:
+ body.update(dict(id=config_match["id"]))
+ components = [dict(type=component_type) for component_type in self.components]
+ body.update(dict(serverAddress=self.address, port=self.port,
+ protocol=self.protocol, components=components))
+ self._logger.info(body)
+ self.make_configuration_request(body)
+
+ # remove specific syslog server configuration
+ elif self.address:
+ update = True
+ body.update(dict(id=config_match["id"]))
+ self._logger.info(body)
+ self.make_configuration_request(body)
+
+ # if no address is specified, remove all syslog server configurations
+ elif configs:
+ update = True
+ for config in configs:
+ body.update(dict(id=config["id"]))
+ self._logger.info(body)
+ self.make_configuration_request(body)
+
+ return update
+
+ def make_configuration_request(self, body):
+ # make http request(s)
+ if not self.check_mode:
+ try:
+ if self.syslog:
+ if "id" in body:
+ (rc, result) = request(
+ self.url + "storage-systems/{0}/syslog/{1}".format(self.ssid, body["id"]),
+ method='POST', data=json.dumps(body), headers=HEADERS, **self.creds)
+ else:
+ (rc, result) = request(self.url + "storage-systems/{0}/syslog".format(self.ssid),
+ method='POST', data=json.dumps(body), headers=HEADERS, **self.creds)
+ body.update(result)
+
+ # send syslog test message
+ if self.test:
+ self.test_configuration(body)
+
+ elif "id" in body:
+ (rc, result) = request(self.url + "storage-systems/{0}/syslog/{1}".format(self.ssid, body["id"]),
+ method='DELETE', headers=HEADERS, **self.creds)
+
+ # This is going to catch cases like a connection failure
+ except Exception as err:
+ self.module.fail_json(msg="We failed to modify syslog configuration! Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(err)))
+
+ def update(self):
+ """Update configuration and respond to ansible."""
+ update = self.update_configuration()
+ self.module.exit_json(msg="The syslog settings have been updated.", changed=update)
+
+ def __call__(self, *args, **kwargs):
+ self.update()
+
+
+def main():
+ settings = Syslog()
+ settings()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_volume.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_volume.py
new file mode 100644
index 000000000..dd388e612
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_volume.py
@@ -0,0 +1,868 @@
+#!/usr/bin/python
+
+# (c) 2016, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['deprecated'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = """
+---
+module: netapp_e_volume
+version_added: "2.2"
+short_description: NetApp E-Series manage storage volumes (standard and thin)
+description:
+ - Create or remove volumes (standard and thin) for NetApp E/EF-series storage arrays.
+author:
+ - Kevin Hulquest (@hulquest)
+ - Nathan Swartz (@ndswartz)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.netapp.eseries
+options:
+ state:
+ description:
+ - Whether the specified volume should exist
+ required: true
+ type: str
+ choices: ['present', 'absent']
+ name:
+ description:
+ - The name of the volume to manage.
+ type: str
+ required: true
+ storage_pool_name:
+ description:
+ - Required only when requested I(state=='present').
+ - Name of the storage pool wherein the volume should reside.
+ type: str
+ required: false
+ size_unit:
+ description:
+ - The unit used to interpret the size parameter
+ choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
+ type: str
+ default: 'gb'
+ size:
+ description:
+ - Required only when I(state=='present').
+ - Size of the volume in I(size_unit).
+ - Size of the virtual volume in the case of a thin volume in I(size_unit).
+ - Maximum virtual volume size of a thin provisioned volume is 256tb; however other OS-level restrictions may
+ exist.
+ type: float
+ required: true
+ segment_size_kb:
+ description:
+ - Segment size of the volume
+ - All values are in kibibytes.
+ - Some common choices include '8', '16', '32', '64', '128', '256', and '512' but options are system
+ dependent.
+ - Retrieve the definitive system list from M(netapp_e_facts) under segment_sizes.
+ - When the storage pool is a raidDiskPool then the segment size must be 128kb.
+ - Segment size migrations are not allowed in this module
+ type: int
+ default: '128'
+ thin_provision:
+ description:
+ - Whether the volume should be thin provisioned.
+ - Thin volumes can only be created when I(raid_level=="raidDiskPool").
+ - Generally, use of thin-provisioning is not recommended due to performance impacts.
+ type: bool
+ default: false
+ thin_volume_repo_size:
+ description:
+ - This value (in size_unit) sets the allocated space for the thin provisioned repository.
+ - Initial value must between or equal to 4gb and 256gb in increments of 4gb.
+ - During expansion operations the increase must be between or equal to 4gb and 256gb in increments of 4gb.
+ - This option has no effect during expansion if I(thin_volume_expansion_policy=="automatic").
+ - Generally speaking you should almost always use I(thin_volume_expansion_policy=="automatic).
+ type: int
+ required: false
+ thin_volume_max_repo_size:
+ description:
+ - This is the maximum amount the thin volume repository will be allowed to grow.
+ - Only has significance when I(thin_volume_expansion_policy=="automatic").
+ - When the percentage I(thin_volume_repo_size) of I(thin_volume_max_repo_size) exceeds
+ I(thin_volume_growth_alert_threshold) then a warning will be issued and the storage array will execute
+ the I(thin_volume_expansion_policy) policy.
+ - Expansion operations when I(thin_volume_expansion_policy=="automatic") will increase the maximum
+ repository size.
+ - The default will be the same as size (in size_unit)
+ type: float
+ thin_volume_expansion_policy:
+ description:
+ - This is the thin volume expansion policy.
+ - When I(thin_volume_expansion_policy=="automatic") and I(thin_volume_growth_alert_threshold) is exceed the
+ I(thin_volume_max_repo_size) will be automatically expanded.
+ - When I(thin_volume_expansion_policy=="manual") and I(thin_volume_growth_alert_threshold) is exceeded the
+ storage system will wait for manual intervention.
+ - The thin volume_expansion policy can not be modified on existing thin volumes in this module.
+ - Generally speaking you should almost always use I(thin_volume_expansion_policy=="automatic).
+ choices: ["automatic", "manual"]
+ default: "automatic"
+ type: str
+ version_added: 2.8
+ thin_volume_growth_alert_threshold:
+ description:
+ - This is the thin provision repository utilization threshold (in percent).
+ - When the percentage of used storage of the maximum repository size exceeds this value then a alert will
+ be issued and the I(thin_volume_expansion_policy) will be executed.
+ - Values must be between or equal to 10 and 99.
+ default: 95
+ type: int
+ version_added: 2.8
+ owning_controller:
+ description:
+ - Specifies which controller will be the primary owner of the volume
+ - Not specifying will allow the controller to choose ownership.
+ required: false
+ choices: ["A", "B"]
+ type: str
+ version_added: 2.9
+ ssd_cache_enabled:
+ description:
+ - Whether an existing SSD cache should be enabled on the volume (fails if no SSD cache defined)
+ - The default value is to ignore existing SSD cache setting.
+ type: bool
+ default: false
+ data_assurance_enabled:
+ description:
+ - Determines whether data assurance (DA) should be enabled for the volume
+ - Only available when creating a new volume and on a storage pool with drives supporting the DA capability.
+ type: bool
+ default: false
+ read_cache_enable:
+ description:
+ - Indicates whether read caching should be enabled for the volume.
+ type: bool
+ default: true
+ version_added: 2.8
+ read_ahead_enable:
+ description:
+ - Indicates whether or not automatic cache read-ahead is enabled.
+ - This option has no effect on thinly provisioned volumes since the architecture for thin volumes cannot
+ benefit from read ahead caching.
+ type: bool
+ default: true
+ version_added: 2.8
+ write_cache_enable:
+ description:
+ - Indicates whether write-back caching should be enabled for the volume.
+ type: bool
+ default: true
+ version_added: 2.8
+ cache_without_batteries:
+ description:
+ - Indicates whether caching should be used without battery backup.
+ - Warning, M(cache_without_batteries==true) and the storage system looses power and there is no battery backup, data will be lost!
+ type: bool
+ default: false
+ version_added: 2.9
+ workload_name:
+ description:
+ - Label for the workload defined by the metadata.
+ - When I(workload_name) and I(metadata) are specified then the defined workload will be added to the storage
+ array.
+ - When I(workload_name) exists on the storage array but the metadata is different then the workload
+ definition will be updated. (Changes will update all associated volumes!)
+ - Existing workloads can be retrieved using M(netapp_e_facts).
+ required: false
+ type: str
+ version_added: 2.8
+ metadata:
+ description:
+ - Dictionary containing meta data for the use, user, location, etc of the volume (dictionary is arbitrarily
+ defined for whatever the user deems useful)
+ - When I(workload_name) exists on the storage array but the metadata is different then the workload
+ definition will be updated. (Changes will update all associated volumes!)
+ - I(workload_name) must be specified when I(metadata) are defined.
+ type: dict
+ required: false
+ version_added: 2.8
+ wait_for_initialization:
+ description:
+ - Forces the module to wait for expansion operations to complete before continuing.
+ type: bool
+ default: false
+ version_added: 2.8
+ initialization_timeout:
+ description:
+ - Duration in seconds before the wait_for_initialization operation will terminate.
+ - M(wait_for_initialization==True) to have any effect on module's operations.
+ type: int
+ required: false
+ version_added: 2.9
+"""
+EXAMPLES = """
+- name: Create simple volume with workload tags (volume meta data)
+ netapp_e_volume:
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+ state: present
+ name: volume
+ storage_pool_name: storage_pool
+ size: 300
+ size_unit: gb
+ workload_name: volume_tag
+ metadata:
+ key1: value1
+ key2: value2
+- name: Create a thin volume
+ netapp_e_volume:
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+ state: present
+ name: volume1
+ storage_pool_name: storage_pool
+ size: 131072
+ size_unit: gb
+ thin_provision: true
+ thin_volume_repo_size: 32
+ thin_volume_max_repo_size: 1024
+- name: Expand thin volume's virtual size
+ netapp_e_volume:
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+ state: present
+ name: volume1
+ storage_pool_name: storage_pool
+ size: 262144
+ size_unit: gb
+ thin_provision: true
+ thin_volume_repo_size: 32
+ thin_volume_max_repo_size: 1024
+- name: Expand thin volume's maximum repository size
+ netapp_e_volume:
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+ state: present
+ name: volume1
+ storage_pool_name: storage_pool
+ size: 262144
+ size_unit: gb
+ thin_provision: true
+ thin_volume_repo_size: 32
+ thin_volume_max_repo_size: 2048
+- name: Delete volume
+ netapp_e_volume:
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+ state: absent
+ name: volume
+"""
+RETURN = """
+msg:
+ description: State of volume
+ type: str
+ returned: always
+ sample: "Standard volume [workload_vol_1] has been created."
+"""
+from time import sleep
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.netapp import NetAppESeriesModule
+from ansible.module_utils._text import to_native
+
+
+class NetAppESeriesVolume(NetAppESeriesModule):
+ VOLUME_CREATION_BLOCKING_TIMEOUT_SEC = 300
+
+ def __init__(self):
+ ansible_options = dict(
+ state=dict(required=True, choices=["present", "absent"]),
+ name=dict(required=True, type="str"),
+ storage_pool_name=dict(type="str"),
+ size_unit=dict(default="gb", choices=["bytes", "b", "kb", "mb", "gb", "tb", "pb", "eb", "zb", "yb"],
+ type="str"),
+ size=dict(type="float"),
+ segment_size_kb=dict(type="int", default=128),
+ owning_controller=dict(required=False, choices=['A', 'B']),
+ ssd_cache_enabled=dict(type="bool", default=False),
+ data_assurance_enabled=dict(type="bool", default=False),
+ thin_provision=dict(type="bool", default=False),
+ thin_volume_repo_size=dict(type="int"),
+ thin_volume_max_repo_size=dict(type="float"),
+ thin_volume_expansion_policy=dict(type="str", choices=["automatic", "manual"], default="automatic"),
+ thin_volume_growth_alert_threshold=dict(type="int", default=95),
+ read_cache_enable=dict(type="bool", default=True),
+ read_ahead_enable=dict(type="bool", default=True),
+ write_cache_enable=dict(type="bool", default=True),
+ cache_without_batteries=dict(type="bool", default=False),
+ workload_name=dict(type="str", required=False),
+ metadata=dict(type="dict", require=False),
+ wait_for_initialization=dict(type="bool", default=False),
+ initialization_timeout=dict(type="int", required=False))
+
+ required_if = [
+ ["state", "present", ["storage_pool_name", "size"]],
+ ["thin_provision", "true", ["thin_volume_repo_size"]]
+ ]
+
+ super(NetAppESeriesVolume, self).__init__(ansible_options=ansible_options,
+ web_services_version="02.00.0000.0000",
+ supports_check_mode=True,
+ required_if=required_if)
+
+ args = self.module.params
+ self.state = args["state"]
+ self.name = args["name"]
+ self.storage_pool_name = args["storage_pool_name"]
+ self.size_unit = args["size_unit"]
+ self.segment_size_kb = args["segment_size_kb"]
+ if args["size"]:
+ self.size_b = self.convert_to_aligned_bytes(args["size"])
+
+ self.owning_controller_id = None
+ if args["owning_controller"]:
+ self.owning_controller_id = "070000000000000000000001" if args["owning_controller"] == "A" else "070000000000000000000002"
+
+ self.read_cache_enable = args["read_cache_enable"]
+ self.read_ahead_enable = args["read_ahead_enable"]
+ self.write_cache_enable = args["write_cache_enable"]
+ self.ssd_cache_enabled = args["ssd_cache_enabled"]
+ self.cache_without_batteries = args["cache_without_batteries"]
+ self.data_assurance_enabled = args["data_assurance_enabled"]
+
+ self.thin_provision = args["thin_provision"]
+ self.thin_volume_expansion_policy = args["thin_volume_expansion_policy"]
+ self.thin_volume_growth_alert_threshold = int(args["thin_volume_growth_alert_threshold"])
+ self.thin_volume_repo_size_b = None
+ self.thin_volume_max_repo_size_b = None
+
+ if args["thin_volume_repo_size"]:
+ self.thin_volume_repo_size_b = self.convert_to_aligned_bytes(args["thin_volume_repo_size"])
+ if args["thin_volume_max_repo_size"]:
+ self.thin_volume_max_repo_size_b = self.convert_to_aligned_bytes(args["thin_volume_max_repo_size"])
+
+ self.workload_name = args["workload_name"]
+ self.metadata = args["metadata"]
+ self.wait_for_initialization = args["wait_for_initialization"]
+ self.initialization_timeout = args["initialization_timeout"]
+
+ # convert metadata to a list of dictionaries containing the keys "key" and "value" corresponding to
+ # each of the workload attributes dictionary entries
+ metadata = []
+ if self.metadata:
+ if not self.workload_name:
+ self.module.fail_json(msg="When metadata is specified then the name for the workload must be specified."
+ " Array [%s]." % self.ssid)
+ for key in self.metadata.keys():
+ metadata.append(dict(key=key, value=self.metadata[key]))
+ self.metadata = metadata
+
+ if self.thin_provision:
+ if not self.thin_volume_max_repo_size_b:
+ self.thin_volume_max_repo_size_b = self.size_b
+
+ if not self.thin_volume_expansion_policy:
+ self.thin_volume_expansion_policy = "automatic"
+
+ if self.size_b > 256 * 1024 ** 4:
+ self.module.fail_json(msg="Thin provisioned volumes must be less than or equal to 256tb is size."
+ " Attempted size [%sg]" % (self.size_b * 1024 ** 3))
+
+ if (self.thin_volume_repo_size_b and self.thin_volume_max_repo_size_b and
+ self.thin_volume_repo_size_b > self.thin_volume_max_repo_size_b):
+ self.module.fail_json(msg="The initial size of the thin volume must not be larger than the maximum"
+ " repository size. Array [%s]." % self.ssid)
+
+ if self.thin_volume_growth_alert_threshold < 10 or self.thin_volume_growth_alert_threshold > 99:
+ self.module.fail_json(msg="thin_volume_growth_alert_threshold must be between or equal to 10 and 99."
+ "thin_volume_growth_alert_threshold [%s]. Array [%s]."
+ % (self.thin_volume_growth_alert_threshold, self.ssid))
+
+ self.volume_detail = None
+ self.pool_detail = None
+ self.workload_id = None
+
+ def convert_to_aligned_bytes(self, size):
+ """Convert size to the truncated byte size that aligns on the segment size."""
+ size_bytes = int(size * self.SIZE_UNIT_MAP[self.size_unit])
+ segment_size_bytes = int(self.segment_size_kb * self.SIZE_UNIT_MAP["kb"])
+ segment_count = int(size_bytes / segment_size_bytes)
+ return segment_count * segment_size_bytes
+
+ def get_volume(self):
+ """Retrieve volume details from storage array."""
+ volumes = list()
+ thin_volumes = list()
+ try:
+ rc, volumes = self.request("storage-systems/%s/volumes" % self.ssid)
+ except Exception as err:
+ self.module.fail_json(msg="Failed to obtain list of thick volumes. Array Id [%s]. Error[%s]."
+ % (self.ssid, to_native(err)))
+ try:
+ rc, thin_volumes = self.request("storage-systems/%s/thin-volumes" % self.ssid)
+ except Exception as err:
+ self.module.fail_json(msg="Failed to obtain list of thin volumes. Array Id [%s]. Error[%s]."
+ % (self.ssid, to_native(err)))
+
+ volume_detail = [volume for volume in volumes + thin_volumes if volume["name"] == self.name]
+ return volume_detail[0] if volume_detail else dict()
+
+ def wait_for_volume_availability(self, retries=VOLUME_CREATION_BLOCKING_TIMEOUT_SEC / 5):
+ """Waits until volume becomes available.
+
+ :raises AnsibleFailJson when retries are exhausted.
+ """
+ if retries == 0:
+ self.module.fail_json(msg="Timed out waiting for the volume %s to become available. Array [%s]."
+ % (self.name, self.ssid))
+ if not self.get_volume():
+ sleep(5)
+ self.wait_for_volume_availability(retries=retries - 1)
+
+ def wait_for_volume_action(self, timeout=None):
+ """Waits until volume action is complete is complete.
+ :param: int timeout: Wait duration measured in seconds. Waits indefinitely when None.
+ """
+ action = "unknown"
+ percent_complete = None
+ while action != "complete":
+ sleep(5)
+
+ try:
+ rc, operations = self.request("storage-systems/%s/symbol/getLongLivedOpsProgress" % self.ssid)
+
+ # Search long lived operations for volume
+ action = "complete"
+ for operation in operations["longLivedOpsProgress"]:
+ if operation["volAction"] is not None:
+ for key in operation.keys():
+ if (operation[key] is not None and "volumeRef" in operation[key] and
+ (operation[key]["volumeRef"] == self.volume_detail["id"] or
+ ("storageVolumeRef" in self.volume_detail and operation[key]["volumeRef"] == self.volume_detail["storageVolumeRef"]))):
+ action = operation["volAction"]
+ percent_complete = operation["init"]["percentComplete"]
+ except Exception as err:
+ self.module.fail_json(msg="Failed to get volume expansion progress. Volume [%s]. Array Id [%s]."
+ " Error[%s]." % (self.name, self.ssid, to_native(err)))
+
+ if timeout is not None:
+ if timeout <= 0:
+ self.module.warn("Expansion action, %s, failed to complete during the allotted time. Time remaining"
+ " [%s]. Array Id [%s]." % (action, percent_complete, self.ssid))
+ self.module.fail_json(msg="Expansion action failed to complete. Time remaining [%s]. Array Id [%s]." % (percent_complete, self.ssid))
+ if timeout:
+ timeout -= 5
+
+ self.module.log("Expansion action, %s, is %s complete." % (action, percent_complete))
+ self.module.log("Expansion action is complete.")
+
+ def get_storage_pool(self):
+ """Retrieve storage pool details from the storage array."""
+ storage_pools = list()
+ try:
+ rc, storage_pools = self.request("storage-systems/%s/storage-pools" % self.ssid)
+ except Exception as err:
+ self.module.fail_json(msg="Failed to obtain list of storage pools. Array Id [%s]. Error[%s]."
+ % (self.ssid, to_native(err)))
+
+ pool_detail = [storage_pool for storage_pool in storage_pools if storage_pool["name"] == self.storage_pool_name]
+ return pool_detail[0] if pool_detail else dict()
+
+ def check_storage_pool_sufficiency(self):
+ """Perform a series of checks as to the sufficiency of the storage pool for the volume."""
+ if not self.pool_detail:
+ self.module.fail_json(msg='Requested storage pool (%s) not found' % self.storage_pool_name)
+
+ if not self.volume_detail:
+ if self.thin_provision and not self.pool_detail['diskPool']:
+ self.module.fail_json(msg='Thin provisioned volumes can only be created on raid disk pools.')
+
+ if (self.data_assurance_enabled and not
+ (self.pool_detail["protectionInformationCapabilities"]["protectionInformationCapable"] and
+ self.pool_detail["protectionInformationCapabilities"]["protectionType"] == "type2Protection")):
+ self.module.fail_json(msg="Data Assurance (DA) requires the storage pool to be DA-compatible."
+ " Array [%s]." % self.ssid)
+
+ if int(self.pool_detail["freeSpace"]) < self.size_b and not self.thin_provision:
+ self.module.fail_json(msg="Not enough storage pool free space available for the volume's needs."
+ " Array [%s]." % self.ssid)
+ else:
+ # Check for expansion
+ if (int(self.pool_detail["freeSpace"]) < int(self.volume_detail["totalSizeInBytes"]) - self.size_b and
+ not self.thin_provision):
+ self.module.fail_json(msg="Not enough storage pool free space available for the volume's needs."
+ " Array [%s]." % self.ssid)
+
+ def update_workload_tags(self, check_mode=False):
+ """Check the status of the workload tag and update storage array definitions if necessary.
+
+ When the workload attributes are not provided but an existing workload tag name is, then the attributes will be
+ used.
+
+ :return bool: Whether changes were required to be made."""
+ change_required = False
+ workload_tags = None
+ request_body = None
+ ansible_profile_id = None
+
+ if self.workload_name:
+ try:
+ rc, workload_tags = self.request("storage-systems/%s/workloads" % self.ssid)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve storage array workload tags. Array [%s]" % self.ssid)
+
+ # Generate common indexed Ansible workload tag
+ current_tag_index_list = [int(pair["value"].replace("ansible_workload_", ""))
+ for tag in workload_tags for pair in tag["workloadAttributes"]
+ if pair["key"] == "profileId" and "ansible_workload_" in pair["value"] and
+ str(pair["value"]).replace("ansible_workload_", "").isdigit()]
+
+ tag_index = 1
+ if current_tag_index_list:
+ tag_index = max(current_tag_index_list) + 1
+
+ ansible_profile_id = "ansible_workload_%d" % tag_index
+ request_body = dict(name=self.workload_name,
+ profileId=ansible_profile_id,
+ workloadInstanceIndex=None,
+ isValid=True)
+
+ # evaluate and update storage array when needed
+ for tag in workload_tags:
+ if tag["name"] == self.workload_name:
+ self.workload_id = tag["id"]
+
+ if not self.metadata:
+ break
+
+ # Determine if core attributes (everything but profileId) is the same
+ metadata_set = set(tuple(sorted(attr.items())) for attr in self.metadata)
+ tag_set = set(tuple(sorted(attr.items()))
+ for attr in tag["workloadAttributes"] if attr["key"] != "profileId")
+ if metadata_set != tag_set:
+ self.module.log("Workload tag change is required!")
+ change_required = True
+
+ # only perform the required action when check_mode==False
+ if change_required and not check_mode:
+ self.metadata.append(dict(key="profileId", value=ansible_profile_id))
+ request_body.update(dict(isNewWorkloadInstance=False,
+ isWorkloadDataInitialized=True,
+ isWorkloadCardDataToBeReset=True,
+ workloadAttributes=self.metadata))
+ try:
+ rc, resp = self.request("storage-systems/%s/workloads/%s" % (self.ssid, tag["id"]),
+ data=request_body, method="POST")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to create new workload tag. Array [%s]. Error [%s]"
+ % (self.ssid, to_native(error)))
+ self.module.log("Workload tag [%s] required change." % self.workload_name)
+ break
+
+ # existing workload tag not found so create new workload tag
+ else:
+ change_required = True
+ self.module.log("Workload tag creation is required!")
+
+ if change_required and not check_mode:
+ if self.metadata:
+ self.metadata.append(dict(key="profileId", value=ansible_profile_id))
+ else:
+ self.metadata = [dict(key="profileId", value=ansible_profile_id)]
+
+ request_body.update(dict(isNewWorkloadInstance=True,
+ isWorkloadDataInitialized=False,
+ isWorkloadCardDataToBeReset=False,
+ workloadAttributes=self.metadata))
+ try:
+ rc, resp = self.request("storage-systems/%s/workloads" % self.ssid,
+ method="POST", data=request_body)
+ self.workload_id = resp["id"]
+ except Exception as error:
+ self.module.fail_json(msg="Failed to create new workload tag. Array [%s]. Error [%s]"
+ % (self.ssid, to_native(error)))
+ self.module.log("Workload tag [%s] was added." % self.workload_name)
+
+ return change_required
+
+ def get_volume_property_changes(self):
+ """Retrieve the volume update request body when change(s) are required.
+
+ :raise AnsibleFailJson when attempting to change segment size on existing volume.
+ :return dict: request body when change(s) to a volume's properties are required.
+ """
+ change = False
+ request_body = dict(flashCache=self.ssd_cache_enabled, metaTags=[],
+ cacheSettings=dict(readCacheEnable=self.read_cache_enable,
+ writeCacheEnable=self.write_cache_enable))
+
+ # check for invalid modifications
+ if self.segment_size_kb * 1024 != int(self.volume_detail["segmentSize"]):
+ self.module.fail_json(msg="Existing volume segment size is %s and cannot be modified."
+ % self.volume_detail["segmentSize"])
+
+ # common thick/thin volume properties
+ if (self.read_cache_enable != self.volume_detail["cacheSettings"]["readCacheEnable"] or
+ self.write_cache_enable != self.volume_detail["cacheSettings"]["writeCacheEnable"] or
+ self.ssd_cache_enabled != self.volume_detail["flashCached"]):
+ change = True
+
+ # controller ownership
+ if self.owning_controller_id and self.owning_controller_id != self.volume_detail["preferredManager"]:
+ change = True
+ request_body.update(dict(owningControllerId=self.owning_controller_id))
+
+ if self.workload_name:
+ request_body.update(dict(metaTags=[dict(key="workloadId", value=self.workload_id),
+ dict(key="volumeTypeId", value="volume")]))
+ if {"key": "workloadId", "value": self.workload_id} not in self.volume_detail["metadata"]:
+ change = True
+ elif self.volume_detail["metadata"]:
+ change = True
+
+ # thick/thin volume specific properties
+ if self.thin_provision:
+ if self.thin_volume_growth_alert_threshold != int(self.volume_detail["growthAlertThreshold"]):
+ change = True
+ request_body.update(dict(growthAlertThreshold=self.thin_volume_growth_alert_threshold))
+ if self.thin_volume_expansion_policy != self.volume_detail["expansionPolicy"]:
+ change = True
+ request_body.update(dict(expansionPolicy=self.thin_volume_expansion_policy))
+ else:
+ if self.read_ahead_enable != (int(self.volume_detail["cacheSettings"]["readAheadMultiplier"]) > 0):
+ change = True
+ request_body["cacheSettings"].update(dict(readAheadEnable=self.read_ahead_enable))
+ if self.cache_without_batteries != self.volume_detail["cacheSettings"]["cwob"]:
+ change = True
+ request_body["cacheSettings"].update(dict(cacheWithoutBatteries=self.cache_without_batteries))
+
+ return request_body if change else dict()
+
+ def get_expand_volume_changes(self):
+ """Expand the storage specifications for the existing thick/thin volume.
+
+ :raise AnsibleFailJson when a thick/thin volume expansion request fails.
+ :return dict: dictionary containing all the necessary values for volume expansion request
+ """
+ request_body = dict()
+
+ if self.size_b < int(self.volume_detail["capacity"]):
+ self.module.fail_json(msg="Reducing the size of volumes is not permitted. Volume [%s]. Array [%s]"
+ % (self.name, self.ssid))
+
+ if self.volume_detail["thinProvisioned"]:
+ if self.size_b > int(self.volume_detail["capacity"]):
+ request_body.update(dict(sizeUnit="bytes", newVirtualSize=self.size_b))
+ self.module.log("Thin volume virtual size have been expanded.")
+
+ if self.volume_detail["expansionPolicy"] == "automatic":
+ if self.thin_volume_max_repo_size_b > int(self.volume_detail["provisionedCapacityQuota"]):
+ request_body.update(dict(sizeUnit="bytes", newRepositorySize=self.thin_volume_max_repo_size_b))
+ self.module.log("Thin volume maximum repository size have been expanded (automatic policy).")
+
+ elif self.volume_detail["expansionPolicy"] == "manual":
+ if self.thin_volume_repo_size_b > int(self.volume_detail["currentProvisionedCapacity"]):
+ change = self.thin_volume_repo_size_b - int(self.volume_detail["currentProvisionedCapacity"])
+ if change < 4 * 1024 ** 3 or change > 256 * 1024 ** 3 or change % (4 * 1024 ** 3) != 0:
+ self.module.fail_json(msg="The thin volume repository increase must be between or equal to 4gb"
+ " and 256gb in increments of 4gb. Attempted size [%sg]."
+ % (self.thin_volume_repo_size_b * 1024 ** 3))
+
+ request_body.update(dict(sizeUnit="bytes", newRepositorySize=self.thin_volume_repo_size_b))
+ self.module.log("Thin volume maximum repository size have been expanded (manual policy).")
+
+ elif self.size_b > int(self.volume_detail["capacity"]):
+ request_body.update(dict(sizeUnit="bytes", expansionSize=self.size_b))
+ self.module.log("Volume storage capacities have been expanded.")
+
+ return request_body
+
+ def create_volume(self):
+ """Create thick/thin volume according to the specified criteria."""
+ body = dict(name=self.name, poolId=self.pool_detail["id"], sizeUnit="bytes",
+ dataAssuranceEnabled=self.data_assurance_enabled)
+
+ if self.thin_provision:
+ body.update(dict(virtualSize=self.size_b,
+ repositorySize=self.thin_volume_repo_size_b,
+ maximumRepositorySize=self.thin_volume_max_repo_size_b,
+ expansionPolicy=self.thin_volume_expansion_policy,
+ growthAlertThreshold=self.thin_volume_growth_alert_threshold))
+ try:
+ rc, volume = self.request("storage-systems/%s/thin-volumes" % self.ssid, data=body, method="POST")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to create thin volume. Volume [%s]. Array Id [%s]. Error[%s]."
+ % (self.name, self.ssid, to_native(error)))
+
+ self.module.log("New thin volume created [%s]." % self.name)
+
+ else:
+ body.update(dict(size=self.size_b, segSize=self.segment_size_kb))
+ try:
+ rc, volume = self.request("storage-systems/%s/volumes" % self.ssid, data=body, method="POST")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to create volume. Volume [%s]. Array Id [%s]. Error[%s]."
+ % (self.name, self.ssid, to_native(error)))
+
+ self.module.log("New volume created [%s]." % self.name)
+
+ def update_volume_properties(self):
+ """Update existing thin-volume or volume properties.
+
+ :raise AnsibleFailJson when either thick/thin volume update request fails.
+ :return bool: whether update was applied
+ """
+ self.wait_for_volume_availability()
+ self.volume_detail = self.get_volume()
+
+ request_body = self.get_volume_property_changes()
+
+ if request_body:
+ if self.thin_provision:
+ try:
+ rc, resp = self.request("storage-systems/%s/thin-volumes/%s"
+ % (self.ssid, self.volume_detail["id"]), data=request_body, method="POST")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to update thin volume properties. Volume [%s]. Array Id [%s]."
+ " Error[%s]." % (self.name, self.ssid, to_native(error)))
+ else:
+ try:
+ rc, resp = self.request("storage-systems/%s/volumes/%s" % (self.ssid, self.volume_detail["id"]),
+ data=request_body, method="POST")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to update volume properties. Volume [%s]. Array Id [%s]."
+ " Error[%s]." % (self.name, self.ssid, to_native(error)))
+ return True
+ return False
+
+ def expand_volume(self):
+ """Expand the storage specifications for the existing thick/thin volume.
+
+ :raise AnsibleFailJson when a thick/thin volume expansion request fails.
+ """
+ request_body = self.get_expand_volume_changes()
+ if request_body:
+ if self.volume_detail["thinProvisioned"]:
+ try:
+ rc, resp = self.request("storage-systems/%s/thin-volumes/%s/expand"
+ % (self.ssid, self.volume_detail["id"]), data=request_body, method="POST")
+ except Exception as err:
+ self.module.fail_json(msg="Failed to expand thin volume. Volume [%s]. Array Id [%s]. Error[%s]."
+ % (self.name, self.ssid, to_native(err)))
+ self.module.log("Thin volume specifications have been expanded.")
+
+ else:
+ try:
+ rc, resp = self.request(
+ "storage-systems/%s/volumes/%s/expand" % (self.ssid, self.volume_detail['id']),
+ data=request_body, method="POST")
+ except Exception as err:
+ self.module.fail_json(msg="Failed to expand volume. Volume [%s]. Array Id [%s]. Error[%s]."
+ % (self.name, self.ssid, to_native(err)))
+
+ self.module.log("Volume storage capacities have been expanded.")
+
+ def delete_volume(self):
+ """Delete existing thin/thick volume."""
+ if self.thin_provision:
+ try:
+ rc, resp = self.request("storage-systems/%s/thin-volumes/%s" % (self.ssid, self.volume_detail["id"]),
+ method="DELETE")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to delete thin volume. Volume [%s]. Array Id [%s]. Error[%s]."
+ % (self.name, self.ssid, to_native(error)))
+ self.module.log("Thin volume deleted [%s]." % self.name)
+ else:
+ try:
+ rc, resp = self.request("storage-systems/%s/volumes/%s" % (self.ssid, self.volume_detail["id"]),
+ method="DELETE")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to delete volume. Volume [%s]. Array Id [%s]. Error[%s]."
+ % (self.name, self.ssid, to_native(error)))
+ self.module.log("Volume deleted [%s]." % self.name)
+
+ def apply(self):
+ """Determine and apply any changes necessary to satisfy the specified criteria.
+
+ :raise AnsibleExitJson when completes successfully"""
+ change = False
+ msg = None
+
+ self.volume_detail = self.get_volume()
+ self.pool_detail = self.get_storage_pool()
+
+ # Determine whether changes need to be applied to existing workload tags
+ if self.state == 'present' and self.update_workload_tags(check_mode=True):
+ change = True
+
+ # Determine if any changes need to be applied
+ if self.volume_detail:
+ if self.state == 'absent':
+ change = True
+
+ elif self.state == 'present':
+ if self.get_expand_volume_changes() or self.get_volume_property_changes():
+ change = True
+
+ elif self.state == 'present':
+ if self.thin_provision and (self.thin_volume_repo_size_b < 4 * 1024 ** 3 or
+ self.thin_volume_repo_size_b > 256 * 1024 ** 3 or
+ self.thin_volume_repo_size_b % (4 * 1024 ** 3) != 0):
+ self.module.fail_json(msg="The initial thin volume repository size must be between 4gb and 256gb in"
+ " increments of 4gb. Attempted size [%sg]."
+ % (self.thin_volume_repo_size_b * 1024 ** 3))
+ change = True
+
+ self.module.log("Update required: [%s]." % change)
+
+ # Apply any necessary changes
+ if change and not self.module.check_mode:
+ if self.state == 'present':
+ if self.update_workload_tags():
+ msg = "Workload tag change occurred."
+
+ if not self.volume_detail:
+ self.check_storage_pool_sufficiency()
+ self.create_volume()
+ self.update_volume_properties()
+ msg = msg[:-1] + " and volume [%s] was created." if msg else "Volume [%s] has been created."
+ else:
+ if self.update_volume_properties():
+ msg = "Volume [%s] properties were updated."
+
+ if self.get_expand_volume_changes():
+ self.expand_volume()
+ msg = msg[:-1] + " and was expanded." if msg else "Volume [%s] was expanded."
+
+ if self.wait_for_initialization:
+ self.module.log("Waiting for volume operation to complete.")
+ self.wait_for_volume_action(timeout=self.initialization_timeout)
+
+ elif self.state == 'absent':
+ self.delete_volume()
+ msg = "Volume [%s] has been deleted."
+
+ else:
+ msg = "Volume [%s] does not exist." if self.state == 'absent' else "Volume [%s] exists."
+
+ self.module.exit_json(msg=(msg % self.name if msg and "%s" in msg else msg), changed=change)
+
+
+def main():
+ volume = NetAppESeriesVolume()
+ volume.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_volume_copy.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_volume_copy.py
new file mode 100644
index 000000000..a6748a54c
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_volume_copy.py
@@ -0,0 +1,431 @@
+#!/usr/bin/python
+
+# (c) 2016, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['deprecated'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = """
+---
+module: netapp_e_volume_copy
+short_description: NetApp E-Series create volume copy pairs
+description:
+ - Create and delete snapshots images on volume groups for NetApp E-series storage arrays.
+version_added: '2.2'
+author: Kevin Hulquest (@hulquest)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.netapp.eseries
+options:
+ ssid:
+ description:
+ - Storage system identifier
+ type: str
+ default: '1'
+ api_username:
+ required: true
+ description:
+ - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ type: str
+ api_password:
+ required: true
+ description:
+ - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ type: str
+ api_url:
+ required: true
+ description:
+ - The url to the SANtricity WebServices Proxy or embedded REST API, for example C(https://prod-1.wahoo.acme.com/devmgr/v2).
+ type: str
+ validate_certs:
+ required: false
+ default: true
+ type: bool
+ description:
+ - Should https certificates be validated?
+ source_volume_id:
+ description:
+ - The id of the volume copy source.
+ - If used, must be paired with destination_volume_id
+ - Mutually exclusive with volume_copy_pair_id, and search_volume_id
+ type: str
+ destination_volume_id:
+ description:
+ - The id of the volume copy destination.
+ - If used, must be paired with source_volume_id
+ - Mutually exclusive with volume_copy_pair_id, and search_volume_id
+ type: str
+ volume_copy_pair_id:
+ description:
+ - The id of a given volume copy pair
+ - Mutually exclusive with destination_volume_id, source_volume_id, and search_volume_id
+ - Can use to delete or check presence of volume pairs
+ - Must specify this or (destination_volume_id and source_volume_id)
+ type: str
+ state:
+ description:
+ - Whether the specified volume copy pair should exist or not.
+ required: True
+ choices: ['present', 'absent']
+ type: str
+ create_copy_pair_if_does_not_exist:
+ description:
+ - Defines if a copy pair will be created if it does not exist.
+ - If set to True destination_volume_id and source_volume_id are required.
+ type: bool
+ default: True
+ start_stop_copy:
+ description:
+ - starts a re-copy or stops a copy in progress
+ - "Note: If you stop the initial file copy before it it done the copy pair will be destroyed"
+ - Requires volume_copy_pair_id
+ type: str
+ choices: ['start', 'stop']
+ search_volume_id:
+ description:
+ - Searches for all valid potential target and source volumes that could be used in a copy_pair
+ - Mutually exclusive with volume_copy_pair_id, destination_volume_id and source_volume_id
+ type: str
+ copy_priority:
+ description:
+ - Copy priority level
+ required: False
+ default: 0
+ type: int
+ onlineCopy:
+ description:
+ - Whether copy should be online
+ required: False
+ default: False
+ type: bool
+ targetWriteProtected:
+ description:
+ - Whether target should be write protected
+ required: False
+ default: True
+ type: bool
+"""
+EXAMPLES = """
+---
+msg:
+ description: Success message
+ returned: success
+ type: str
+ sample: Json facts for the volume copy that was created.
+"""
+RETURN = """
+msg:
+ description: Success message
+ returned: success
+ type: str
+ sample: Created Volume Copy Pair with ID
+"""
+
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.netapp import request
+
+HEADERS = {
+ "Content-Type": "application/json",
+ "Accept": "application/json",
+}
+
+
+def find_volume_copy_pair_id_from_source_volume_id_and_destination_volume_id(params):
+ get_status = 'storage-systems/%s/volume-copy-jobs' % params['ssid']
+ url = params['api_url'] + get_status
+
+ (rc, resp) = request(url, method='GET', url_username=params['api_username'],
+ url_password=params['api_password'], headers=HEADERS,
+ validate_certs=params['validate_certs'])
+
+ volume_copy_pair_id = None
+ for potential_copy_pair in resp:
+ if potential_copy_pair['sourceVolume'] == params['source_volume_id']:
+ if potential_copy_pair['sourceVolume'] == params['source_volume_id']:
+ volume_copy_pair_id = potential_copy_pair['id']
+
+ return volume_copy_pair_id
+
+
+def create_copy_pair(params):
+ get_status = 'storage-systems/%s/volume-copy-jobs' % params['ssid']
+ url = params['api_url'] + get_status
+
+ rData = {
+ "sourceId": params['source_volume_id'],
+ "targetId": params['destination_volume_id']
+ }
+
+ (rc, resp) = request(url, data=json.dumps(rData), ignore_errors=True, method='POST',
+ url_username=params['api_username'], url_password=params['api_password'], headers=HEADERS,
+ validate_certs=params['validate_certs'])
+ if rc != 200:
+ return False, (rc, resp)
+ else:
+ return True, (rc, resp)
+
+
+def delete_copy_pair_by_copy_pair_id(params):
+ get_status = 'storage-systems/%s/volume-copy-jobs/%s?retainRepositories=false' % (
+ params['ssid'], params['volume_copy_pair_id'])
+ url = params['api_url'] + get_status
+
+ (rc, resp) = request(url, ignore_errors=True, method='DELETE',
+ url_username=params['api_username'], url_password=params['api_password'], headers=HEADERS,
+ validate_certs=params['validate_certs'])
+ if rc != 204:
+ return False, (rc, resp)
+ else:
+ return True, (rc, resp)
+
+
+def find_volume_copy_pair_id_by_volume_copy_pair_id(params):
+ get_status = 'storage-systems/%s/volume-copy-jobs/%s?retainRepositories=false' % (
+ params['ssid'], params['volume_copy_pair_id'])
+ url = params['api_url'] + get_status
+
+ (rc, resp) = request(url, ignore_errors=True, method='DELETE',
+ url_username=params['api_username'], url_password=params['api_password'], headers=HEADERS,
+ validate_certs=params['validate_certs'])
+ if rc != 200:
+ return False, (rc, resp)
+ else:
+ return True, (rc, resp)
+
+
+def start_stop_copy(params):
+ get_status = 'storage-systems/%s/volume-copy-jobs-control/%s?control=%s' % (
+ params['ssid'], params['volume_copy_pair_id'], params['start_stop_copy'])
+ url = params['api_url'] + get_status
+
+ (response_code, response_data) = request(url, ignore_errors=True, method='POST',
+ url_username=params['api_username'], url_password=params['api_password'],
+ headers=HEADERS,
+ validate_certs=params['validate_certs'])
+
+ if response_code == 200:
+ return True, response_data[0]['percentComplete']
+ else:
+ return False, response_data
+
+
+def check_copy_status(params):
+ get_status = 'storage-systems/%s/volume-copy-jobs-control/%s' % (
+ params['ssid'], params['volume_copy_pair_id'])
+ url = params['api_url'] + get_status
+
+ (response_code, response_data) = request(url, ignore_errors=True, method='GET',
+ url_username=params['api_username'], url_password=params['api_password'],
+ headers=HEADERS,
+ validate_certs=params['validate_certs'])
+
+ if response_code == 200:
+ if response_data['percentComplete'] != -1:
+
+ return True, response_data['percentComplete']
+ else:
+ return False, response_data['percentComplete']
+ else:
+ return False, response_data
+
+
+def find_valid_copy_pair_targets_and_sources(params):
+ get_status = 'storage-systems/%s/volumes' % params['ssid']
+ url = params['api_url'] + get_status
+
+ (response_code, response_data) = request(url, ignore_errors=True, method='GET',
+ url_username=params['api_username'], url_password=params['api_password'],
+ headers=HEADERS,
+ validate_certs=params['validate_certs'])
+
+ if response_code == 200:
+ source_capacity = None
+ candidates = []
+ for volume in response_data:
+ if volume['id'] == params['search_volume_id']:
+ source_capacity = volume['capacity']
+ else:
+ candidates.append(volume)
+
+ potential_sources = []
+ potential_targets = []
+
+ for volume in candidates:
+ if volume['capacity'] > source_capacity:
+ if volume['volumeCopyTarget'] is False:
+ if volume['volumeCopySource'] is False:
+ potential_targets.append(volume['id'])
+ else:
+ if volume['volumeCopyTarget'] is False:
+ if volume['volumeCopySource'] is False:
+ potential_sources.append(volume['id'])
+
+ return potential_targets, potential_sources
+
+ else:
+ raise Exception("Response [%s]" % response_code)
+
+
+def main():
+ module = AnsibleModule(argument_spec=dict(
+ source_volume_id=dict(type='str'),
+ destination_volume_id=dict(type='str'),
+ copy_priority=dict(required=False, default=0, type='int'),
+ ssid=dict(type='str', default='1'),
+ api_url=dict(required=True),
+ api_username=dict(required=False),
+ api_password=dict(required=False, no_log=True),
+ validate_certs=dict(required=False, default=True, type='bool'),
+ targetWriteProtected=dict(required=False, default=True, type='bool'),
+ onlineCopy=dict(required=False, default=False, type='bool'),
+ volume_copy_pair_id=dict(type='str'),
+ state=dict(required=True, choices=['present', 'absent'], type='str'),
+ create_copy_pair_if_does_not_exist=dict(required=False, default=True, type='bool'),
+ start_stop_copy=dict(required=False, choices=['start', 'stop'], type='str'),
+ search_volume_id=dict(type='str'),
+ ),
+ mutually_exclusive=[['volume_copy_pair_id', 'destination_volume_id'],
+ ['volume_copy_pair_id', 'source_volume_id'],
+ ['volume_copy_pair_id', 'search_volume_id'],
+ ['search_volume_id', 'destination_volume_id'],
+ ['search_volume_id', 'source_volume_id'],
+ ],
+ required_together=[['source_volume_id', 'destination_volume_id'],
+ ],
+ required_if=[["create_copy_pair_if_does_not_exist", True, ['source_volume_id', 'destination_volume_id'], ],
+ ["start_stop_copy", 'stop', ['volume_copy_pair_id'], ],
+ ["start_stop_copy", 'start', ['volume_copy_pair_id'], ],
+ ]
+
+ )
+ params = module.params
+
+ if not params['api_url'].endswith('/'):
+ params['api_url'] += '/'
+
+ # Check if we want to search
+ if params['search_volume_id'] is not None:
+ try:
+ potential_targets, potential_sources = find_valid_copy_pair_targets_and_sources(params)
+ except Exception as e:
+ module.fail_json(msg="Failed to find valid copy pair candidates. Error [%s]" % to_native(e))
+
+ module.exit_json(changed=False,
+ msg=' Valid source devices found: %s Valid target devices found: %s' % (len(potential_sources), len(potential_targets)),
+ search_volume_id=params['search_volume_id'],
+ valid_targets=potential_targets,
+ valid_sources=potential_sources)
+
+ # Check if we want to start or stop a copy operation
+ if params['start_stop_copy'] == 'start' or params['start_stop_copy'] == 'stop':
+
+ # Get the current status info
+ currenty_running, status_info = check_copy_status(params)
+
+ # If we want to start
+ if params['start_stop_copy'] == 'start':
+
+ # If we have already started
+ if currenty_running is True:
+ module.exit_json(changed=False, msg='Volume Copy Pair copy has started.',
+ volume_copy_pair_id=params['volume_copy_pair_id'], percent_done=status_info)
+ # If we need to start
+ else:
+
+ start_status, info = start_stop_copy(params)
+
+ if start_status is True:
+ module.exit_json(changed=True, msg='Volume Copy Pair copy has started.',
+ volume_copy_pair_id=params['volume_copy_pair_id'], percent_done=info)
+ else:
+ module.fail_json(msg="Could not start volume copy pair Error: %s" % info)
+
+ # If we want to stop
+ else:
+ # If it has already stopped
+ if currenty_running is False:
+ module.exit_json(changed=False, msg='Volume Copy Pair copy is stopped.',
+ volume_copy_pair_id=params['volume_copy_pair_id'])
+
+ # If we need to stop it
+ else:
+ start_status, info = start_stop_copy(params)
+
+ if start_status is True:
+ module.exit_json(changed=True, msg='Volume Copy Pair copy has been stopped.',
+ volume_copy_pair_id=params['volume_copy_pair_id'])
+ else:
+ module.fail_json(msg="Could not stop volume copy pair Error: %s" % info)
+
+ # If we want the copy pair to exist we do this stuff
+ if params['state'] == 'present':
+
+ # We need to check if it exists first
+ if params['volume_copy_pair_id'] is None:
+ params['volume_copy_pair_id'] = find_volume_copy_pair_id_from_source_volume_id_and_destination_volume_id(
+ params)
+
+ # If no volume copy pair is found we need need to make it.
+ if params['volume_copy_pair_id'] is None:
+
+ # In order to create we can not do so with just a volume_copy_pair_id
+
+ copy_began_status, (rc, resp) = create_copy_pair(params)
+
+ if copy_began_status is True:
+ module.exit_json(changed=True, msg='Created Volume Copy Pair with ID: %s' % resp['id'])
+ else:
+ module.fail_json(msg="Could not create volume copy pair Code: %s Error: %s" % (rc, resp))
+
+ # If it does exist we do nothing
+ else:
+ # We verify that it exists
+ exist_status, (exist_status_code, exist_status_data) = find_volume_copy_pair_id_by_volume_copy_pair_id(
+ params)
+
+ if exist_status:
+ module.exit_json(changed=False,
+ msg=' Volume Copy Pair with ID: %s exists' % params['volume_copy_pair_id'])
+ else:
+ if exist_status_code == 404:
+ module.fail_json(
+ msg=' Volume Copy Pair with ID: %s does not exist. Can not create without source_volume_id and destination_volume_id' %
+ params['volume_copy_pair_id'])
+ else:
+ module.fail_json(msg="Could not find volume copy pair Code: %s Error: %s" % (
+ exist_status_code, exist_status_data))
+
+ module.fail_json(msg="Done")
+
+ # If we want it to not exist we do this
+ else:
+
+ if params['volume_copy_pair_id'] is None:
+ params['volume_copy_pair_id'] = find_volume_copy_pair_id_from_source_volume_id_and_destination_volume_id(
+ params)
+
+ # We delete it by the volume_copy_pair_id
+ delete_status, (delete_status_code, delete_status_data) = delete_copy_pair_by_copy_pair_id(params)
+
+ if delete_status is True:
+ module.exit_json(changed=True,
+ msg=' Volume Copy Pair with ID: %s was deleted' % params['volume_copy_pair_id'])
+ else:
+ if delete_status_code == 404:
+ module.exit_json(changed=False,
+ msg=' Volume Copy Pair with ID: %s does not exist' % params['volume_copy_pair_id'])
+ else:
+ module.fail_json(msg="Could not delete volume copy pair Code: %s Error: %s" % (
+ delete_status_code, delete_status_data))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_common/.travis.yml b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_common/.travis.yml
new file mode 100644
index 000000000..36bbf6208
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_common/.travis.yml
@@ -0,0 +1,29 @@
+---
+language: python
+python: "2.7"
+
+# Use the new container infrastructure
+sudo: false
+
+# Install ansible
+addons:
+ apt:
+ packages:
+ - python-pip
+
+install:
+ # Install ansible
+ - pip install ansible
+
+ # Check ansible version
+ - ansible --version
+
+ # Create ansible.cfg with correct roles_path
+ - printf '[defaults]\nroles_path=../' >ansible.cfg
+
+script:
+ # Basic role syntax check
+ - ansible-playbook tests/test.yml -i tests/inventory --syntax-check
+
+notifications:
+ webhooks: https://galaxy.ansible.com/api/v1/notifications/ \ No newline at end of file
diff --git a/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_common/README.md b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_common/README.md
new file mode 100644
index 000000000..b5ae41037
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_common/README.md
@@ -0,0 +1,149 @@
+nar_santricity_common
+=====================
+ Discover NetApp E-Series storage systems and configures SANtricity Web Services Proxy.
+
+ The following variables will be added to the runtime host inventory.
+ current_eseries_api_url: # Web Services REST API URL
+ current_eseries_api_username: # Web Services REST API username
+ current_eseries_api_password: # Web Services REST API password
+ current_eseries_ssid: # Arbitrary string for the proxy to represent the storage system.
+ current_eseries_validate_certs: # Indicates whether SSL certificates should be verified.
+ current_eseries_api_is_proxy: # Indicates whether Web Services REST API is running on a proxy.
+
+
+Requirements
+------------
+ - NetApp E-Series E2800 platform or newer or NetApp E-Series SANtricity Web Services Proxy configured for older E-Series storage systems.
+
+Tested Ansible Versions
+-----------------------
+ - Ansible 5.x (ansible-core 2.12)
+
+Example Playbook
+----------------
+ - hosts: eseries_storage_systems
+ gather_facts: false
+ collection:
+ - netapp_eseries.santricity
+ tasks:
+ - name: Configure SANtricity Web Services and discover storage systems
+ import_role:
+ name: nar_santricity_common
+
+
+Example Inventory Host file using discovery with proxy
+------------------------------------------------------
+ eseries_proxy_api_url: https://192.168.1.100:8443/devmgr/v2/
+ eseries_proxy_api_password: admin_password
+ eseries_subnet: 192.168.1.0/24 # This should only be defined at the group level once when utilizing Web Services Proxy and should be broad enough to include all systems being added to proxy instance.
+ eseries_system_serial: 012345678901
+ eseries_system_password: admin_password
+ eseries_validate_certs: false
+ (...)
+
+
+Example Inventory Host file using discovery without proxy
+---------------------------------------------------------
+**Note that while eseries_management_interfaces or eseries_system_api_url are optional, including at least one of them will prevent the discovery mechanism from being used when the system can be reached from their information.
+ eseries_subnet: 192.168.1.0/24
+ eseries_system_serial: 012345678901
+ eseries_system_password: admin_password
+ eseries_validate_certs: false
+ (...)
+
+
+Example Inventory Host file without using discovery (Embedded Web Services)
+---------------------------------------------------------------------------
+ eseries_system_api_url: https://192.168.1.200:8443/devmgr/v2/
+ eseries_system_password: admin_password
+ eseries_validate_certs: false
+ (...)
+
+
+Example Inventory Host file without using discovery (Proxy Web Services - system must have already been added to the proxy)
+------------------------------------------------------------------------
+ eseries_proxy_ssid: storage_ssid
+ eseries_proxy_api_url: https://192.168.2.200:8443/devmgr/v2/
+ eseries_proxy_api_password: admin_password
+ (...)
+
+
+Notes
+-----
+Use SANtricity Web Services Proxy to avoid the need to discover the storage systems each time nar_santricity_common is executed. The first time nar_santricity_common is executed will add the storage systems the proxy so that they can be recalled without the need to search the subnet each subsequent execution.
+The na_santricity_proxy_systems module is used to add storage systems to the proxy but required a complete list of desired systems since it will ensure that only the systems provided will remain on the proxy. As a result any system that is not included will be removed from the proxy.
+
+Role Variables
+--------------
+ eseries_subnet: # Network subnet to search for the storage system specified in CIDR form. Example: 192.168.1.0/24
+ # Note: eseries_subnet should only be defined once at the group level when utilizing the Web Services Proxy.
+ eseries_template_api_url: # Template for the web services api url. Default: https://0.0.0.0:8443/devmgr/v2/
+ eseries_prefer_embedded: false # Overrides the default behavior of using Web Services Proxy when eseries_proxy_api_url is defined. This will only effect storage systems that have Embedded Web Services.
+ eseries_validate_certs: true # Indicates Whether SSL certificates should be verified. Used for both embedded and proxy. Choices: true, false
+
+ # Storage system specific variables
+ eseries_proxy_ssid: # Arbitrary string for the proxy to represent the storage system. eseries_system_serial will be used when not defined.
+ eseries_system_serial: # Storage system serial number. (This is located on a label at the top-left towards the front on the device)
+ eseries_system_addresses: # Storage system management IP addresses. Only required when eseries_system_serial or eseries_system_api_url are not defined. When not specified, addresses will be populated with eseries_management_interfaces controller addresses.
+ eseries_system_api_url: # Url for the storage system's for embedded web services rest api. Example: https://192.168.10.100/devmgr/v2
+ eseries_system_username: admin # Username for the storage system's for embedded web services rest api
+ eseries_system_password: # Password for the storage system's for embedded web services rest api and when the admin password has not been set eseries_system_password will be used to set it.
+ eseries_system_tags: # Meta tags to associate with storage system when added to the proxy.
+
+ # Storage system management interface information
+ Note: eseries_management_interfaces will be used when eseries_system_serial, eseries_system_api_url, or eseries_system_addresses are not defined.
+ eseries_management_interfaces: # Subset of the eseries_management_interface variable found in the nar_santricity_management role
+ controller_a:
+ - address: # Controller A port 1's IP address
+ - address: # Controller A port 2's IP address
+ controller_b:
+ - address: # Controller B port 1's IP address
+ - address: # Controller B port 2's IP address
+
+ # Web Services Proxy specific variable
+ Note: eseries_proxy_* variables are required to discover storage systems prior to SANtricity OS version 11.60.2.
+ eseries_proxy_api_url: # Url for the storage system's for proxy web services rest api. Example: https://192.168.10.100/devmgr/v2
+ eseries_proxy_api_username: # Username for the storage system's for proxy web services rest api (Default: admin).
+ eseries_proxy_api_password: # Password for the storage system's for proxy web services rest api and when the admin password has
+ # not been set eseries_proxy_api_password will be used to set it.
+ eseries_proxy_api_old_password: # Previous proxy admin password. This is used to change the current admin password by setting this
+ # variable to the current proxy password and eseries_proxy_api_password to the new password.
+ eseries_proxy_monitor_password: # Proxy password for the monitor username
+ eseries_proxy_security_password: # Proxy password for the security username
+ eseries_proxy_storage_password: # Proxy password for the monitor username
+ eseries_proxy_support_password: # Proxy password for the support username
+ eseries_proxy_accept_certifications: # Force automatic acceptance of all storage system's certificate
+ eseries_proxy_default_system_tags: # Default meta tags to associate with all storage systems
+ eseries_proxy_default_password: # Default password to associate with all storage systems. This is overridden by eseries_system_password.
+ eseries_proxy_client_certificate_common_certificates: # List of common proxy client certificate file paths. These files will be appended to each client certificate list.
+ eseries_proxy_client_certificate_certificates: # List of proxy client certificate file paths
+ eseries_proxy_server_certificate_common_certificates: # List of common proxy server certificates. These files will be appended to each controller's server certificate list.
+ eseries_proxy_server_certificate_common_passphrase: # Common passphrase for decrypting PEM (PKCS8) private key.
+ eseries_proxy_server_certificate_certificates: # List of proxy server certificates. Leave blank to use self-signed certificate.
+ eseries_proxy_server_certificate_passphrase: # Passphrase for decrypting PEM (PKCS8) private key.
+
+ # LDAP configuration defaults
+ eseries_proxy_ldap_state: # Whether LDAP should be configured for the proxy`
+ eseries_proxy_ldap_identifier: # The user attributes that should be considered for the group to role mapping
+ eseries_proxy_ldap_user_attribute: # Attribute used to the provided username during authentication.
+ eseries_proxy_ldap_bind_username: # User account that will be used for querying the LDAP server.
+ eseries_proxy_ldap_bind_password: # Password for the bind user account
+ eseries_proxy_ldap_server: # LDAP server URL.
+ eseries_proxy_ldap_search_base: # Search base used for find user's group membership
+ eseries_proxy_ldap_role_mappings: # Dictionary of user groups, each containing the list of access roles.
+ # Role choices: storage.admin - allows users full read/writes access to storage objects and operations.
+ # storage.monitor - allows users read-only access to storage objects and operations.
+ # storage.admin - allows users access to hardware, diagnostic information, major event logs,
+ # and other critical support-related functionality, but not the sorage configuration.
+ # security.admin - allows users access to authentication/authorization configuration, as
+ # well as the audit log configuration, adn certification management.
+
+
+License
+-------
+ BSD-3-Clause
+
+
+Author Information
+------------------
+ Nathan Swartz (@ndswartz)
diff --git a/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_common/defaults/main.yml b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_common/defaults/main.yml
new file mode 100644
index 000000000..4c0233b24
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_common/defaults/main.yml
@@ -0,0 +1,45 @@
+---
+#eseries_subnet: # Network subnet to search for the storage system specified in CIDR form. Example: 192.168.1.0/24
+eseries_subnet_default_prefix: 22 # Default subnet prefix to use when no other prefix is supplied. Default: 22
+eseries_template_api_url: https://0.0.0.0:8443/devmgr/v2/ # Template for the web services api url. Default: https://0.0.0.0:8443/devmgr/v2/
+#eseries_validate_certs: # Whether SSL certificates should be verified. Used for both embedded and proxy. Choices: true, false
+eseries_prefer_embedded: false # Overrides the default behavior of using Web Services Proxy when eseries_proxy_api_url is defined. This will only
+ # effect storage systems that have Embedded Web Services.
+
+# Storage system specific variables
+# ---------------------------------
+#eseries_proxy_ssid: # Arbitrary string for the proxy to represent the storage system. eseries_system_serial will be used when not defined.
+#eseries_system_serial: # Storage system serial number (This is located on a label at the top-left towards the front on the device)
+#eseries_system_addresses: # Storage system management IP addresses. Only required when eseries_system_serial or eseries_system_api_url are not
+ # defined. When not specified, addresses will be populated with eseries_management_interfaces controller addresses
+#eseries_system_api_url: # Url for the storage system's for embedded web services rest api. Example: https://192.168.10.100/devmgr/v2
+eseries_system_username: admin # Username for the storage system's for embedded web services rest api
+#eseries_system_password: # Password for the storage system's for embedded web services rest api and when the admin password has not been set
+ # eseries_system_password will be used to set it.
+#eseries_system_tags: # Meta tags to associate with storage system when added to the proxy.
+
+# Storage system management interface information
+# -----------------------------------------------
+# Note: eseries_management_interfaces will be used when eseries_system_serial, eseries_system_api_url, or eseries_system_addresses are not defined.
+#eseries_management_interfaces: # Subset of the eseries_management_interface variable found in the nar_santricity_management role
+# controller_a:
+# - address: # Controller A port 1's IP address
+# - address: # Controller A port 2's IP address
+# controller_b:
+# - address: # Controller B port 1's IP address
+# - address: # Controller B port 2's IP address
+
+# Web Services Proxy specific variable
+# ------------------------------------
+# Note: eseries_proxy_* variables are required to discover storage systems prior to SANtricity OS version 11.60.2.
+#eseries_proxy_api_url: # Url for the storage system's for proxy web services rest api. Example: https://192.168.10.100/devmgr/v2
+eseries_proxy_api_username: admin # Username for the storage system's for proxy web services rest api.
+#eseries_proxy_api_password: # Password for the storage system's for proxy web services rest api and when the admin password has not been set
+ # eseries_proxy_api_password will be used to set it.
+#eseries_proxy_monitor_password: # Proxy password for the monitor username
+#eseries_proxy_security_password: # Proxy password for the security username
+#eseries_proxy_storage_password: # Proxy password for the monitor username
+#eseries_proxy_support_password: # Proxy password for the support username
+#eseries_proxy_accept_certifications: # Force automatic acceptance of all storage system's certificate
+#eseries_proxy_default_system_tags: # Default meta tags to associate with all storage systems
+#eseries_proxy_default_password: # Default password to associate with all storage systems. This is overridden by eseries_system_password.
diff --git a/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_common/meta/main.yml b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_common/meta/main.yml
new file mode 100644
index 000000000..62da5894a
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_common/meta/main.yml
@@ -0,0 +1,13 @@
+galaxy_info:
+ author: Nathan Swartz (@ndswartz)
+ description: Discover NetApp E-Series storage systems and configures SANtricity Web Services Proxy.
+ company: NetApp, Inc
+ license: BSD-3-Clause
+ platforms: []
+ min_ansible_version: 2.13
+ galaxy_tags:
+ - netapp
+ - eseries
+ - storage
+
+dependencies: [] \ No newline at end of file
diff --git a/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_common/tasks/build_info.yml b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_common/tasks/build_info.yml
new file mode 100644
index 000000000..a78af62ff
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_common/tasks/build_info.yml
@@ -0,0 +1,38 @@
+- name: Collect storage system facts
+ uri:
+ url: |-
+ {%- if eseries_prefer_embedded == True -%}
+ {{- eseries_system_api_url | default(eseries_proxy_api_url) | regex_replace('v2/?$', 'utils/about') -}}
+ {%- else -%}
+ {{- eseries_proxy_api_url | default(eseries_system_api_url) | regex_replace('v2/?$', 'utils/about') -}}
+ {%- endif -%}
+ headers:
+ Content-Type: "application/json"
+ Accept: "application/json"
+ validate_certs: false
+ connection: local
+ register: about
+ failed_when: false
+ when: eseries_proxy_api_url is defined or eseries_system_api_url is defined
+ tags: always
+
+- name: Determine whether SANtricity Web Services REST API is proxy and information
+ set_fact:
+ current_eseries_api_is_proxy: "{{ about['json']['runningAsProxy'] | default(False) }}"
+ tags: always
+
+- name: Collect Web Services information from either proxy or embedded with a preference for embedded.
+ include_tasks: collect_facts/prefer_embedded.yml
+ when: (current_eseries_api_is_proxy == True and eseries_prefer_embedded == True) or current_eseries_api_is_proxy == False
+ tags: always
+
+- name: Collect Web Services information from proxy.
+ include_tasks: collect_facts/prefer_proxy.yml
+ when: current_eseries_api_is_proxy == True and current_eseries_api_url is not defined
+ tags: always
+
+- name: Check whether current_eseries_api_url is defined
+ fail:
+ msg: "Could not determine or discover storage system contact information!"
+ when: current_eseries_api_url is not defined or current_eseries_ssid is not defined
+ tags: always
diff --git a/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_common/tasks/collect_facts/discovery.yml b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_common/tasks/collect_facts/discovery.yml
new file mode 100644
index 000000000..3a5e0da49
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_common/tasks/collect_facts/discovery.yml
@@ -0,0 +1,64 @@
+- name: Check whether eseries_system_serial is defined.
+ ansible.builtin.fail:
+ msg: "Unable to search for storage system! The variable eseries_system_serial must be defined (See SANtricity README, section Storage System Credentials)."
+ when: eseries_system_serial is not defined
+
+- name: Determine associated management interface subnet information.
+ ansible.builtin.set_fact:
+ eseries_subnets: |-
+ {%- set subnets = [] %}
+ {%- if eseries_subnet is defined -%}
+ {%- if subnets.append(eseries_subnet) -%}{%- endif -%}
+ {%- endif -%}
+ {%- set interfaces = eseries_management_interfaces["controller_a"] | default([]) + eseries_management_interfaces["controller_b"] | default([]) -%}
+ {%- for interface in interfaces if interface["address"] is defined -%}
+ {%- if subnets.append(interface["address"] ~ "/" ~ interface["subnet_mask"] | default(eseries_management_interfaces["subnet_mask"] | default(eseries_management_subnet_mask | default(eseries_subnet_default_prefix)))) -%}{%- endif -%}
+ {%- endfor -%}
+ {{- subnets | ansible.utils.ipaddr('network/prefix') | unique -}}
+
+- name: Ensure there is at least one subnet to search for storage systems.
+ ansible.builtin.fail:
+ msg: "There is not enough management information to search for the storage system(s)! (See SANtricity README, section Storage System Credentials)"
+ when: eseries_subnets | length == 0
+
+- name: Discover storage systems from all subnet ranges
+ netapp_eseries.santricity.na_santricity_discover:
+ proxy_url: "{{ item['value']['proxy_url'] }}"
+ proxy_username: "{{ item['value']['proxy_username'] }}"
+ proxy_password: "{{ item['value']['proxy_password'] }}"
+ proxy_validate_certs: "{{ item['value']['proxy_validate_certs'] }}"
+ subnet_mask: "{{ item['key'] }}"
+ prefer_embedded: "{{ item['value']['prefer_embedded'] }}"
+ run_once: true
+ connection: local
+ register: discovered_systems
+ loop: "{{ subnets | dict2items }}"
+ tags: always
+ vars:
+ subnets: |-
+ {#- Build a dictionary subnet searches and any proxies should they be available #}
+ {%- set systems = {} %}
+ {%- for array in ansible_play_hosts_all %}
+ {%- for eseries_subnet in hostvars[array]["eseries_subnets"] | default([]) -%}
+
+ {%- if "eseries_proxy_api_url" in (hostvars[array].keys() | list) -%}
+ {%- if systems.update({eseries_subnet: {
+ "proxy_url": hostvars[array]["eseries_proxy_api_url"] | default(omit),
+ "proxy_username": hostvars[array]["eseries_proxy_api_username"] | default("admin"),
+ "proxy_password": hostvars[array]["eseries_proxy_api_password"] | default(omit),
+ "prefer_embedded": hostvars[array]["eseries_prefer_embedded"] | default(omit),
+ "proxy_validate_certs": hostvars[array]["eseries_validate_certs"] | default(omit)}}) %}
+ {%- endif %}
+ {%- else -%}
+ {%- if systems.update({eseries_subnet: {
+ "proxy_url": hostvars[array]["eseries_proxy_api_url"] | default(omit),
+ "proxy_username": hostvars[array]["eseries_proxy_api_username"] | default(omit),
+ "proxy_password": hostvars[array]["eseries_proxy_api_password"] | default(omit),
+ "prefer_embedded": hostvars[array]["eseries_prefer_embedded"] | default(omit),
+ "proxy_validate_certs": hostvars[array]["eseries_validate_certs"] | default(omit)}}) %}
+ {%- endif %}
+ {%- endif -%}
+
+ {%- endfor %}
+ {%- endfor %}
+ {{ systems }}
diff --git a/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_common/tasks/collect_facts/prefer_embedded.yml b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_common/tasks/collect_facts/prefer_embedded.yml
new file mode 100644
index 000000000..e9fb16bc7
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_common/tasks/collect_facts/prefer_embedded.yml
@@ -0,0 +1,150 @@
+# Test whether eseries_system_api_url is a valid storage system web services api.
+- name: Validate the storage system embedded Web Services REST API supplied in eseries_system_api_url.
+ block:
+ - name: Determine management url based on eseries_system_api_url.
+ set_fact:
+ eseries_api_url_list: "{{ [eseries_system_api_url] }}"
+
+ - name: Check for valid storage system Web Services API url.
+ include_tasks: collect_facts/validate_system_api_url.yml
+ when: (current_eseries_api_url is not defined or current_eseries_ssid is not defined) and eseries_system_api_url is defined
+ tags: always
+
+
+# Test when a valid storage system web services api can be extrapolated from eseries_management_interfaces
+- name: Validate the storage system embedded Web Services REST API derived from eseries_management_interfaces.
+ block:
+ - name: Determine management interface IP addresses
+ set_fact:
+ eseries_api_url_list: |-
+ {%- set addresses = [] -%}
+ {%- set url_info = eseries_template_api_url | urlsplit %}
+ {%- for address in [eseries_management_interfaces["controller_a"][0]["address"] | default(""),
+ eseries_management_interfaces["controller_a"][1]["address"] | default(""),
+ eseries_management_interfaces["controller_b"][0]["address"] | default(""),
+ eseries_management_interfaces["controller_b"][1]["address"] | default("")] -%}
+ {%- if address != "" and addresses.append([url_info["scheme"], "://", address, ":", url_info["port"], url_info["path"]] | join("")) %}{%- endif -%}
+ {%- endfor %}
+ {{ addresses }}
+
+ - name: Check for valid storage system Web Services API url.
+ include_tasks: collect_facts/validate_system_api_url.yml
+ when: (current_eseries_api_url is not defined or current_eseries_ssid is not defined) and eseries_management_interfaces is defined
+ tags: always
+
+
+# If proxy is available get information from there and avoid the discovery process.
+- name: Attempt to retrieve the storage system from Web Services Proxy.
+ block:
+ - name: Determine existing storage systems in Web Services Proxy.
+ uri:
+ url: "{{ eseries_proxy_api_url | regex_replace('v2/?$', 'v2/storage-systems') }}"
+ headers:
+ Content-Type: "application/json"
+ Accept: "application/json"
+ url_username: "{{ eseries_proxy_api_username | default('admin') }}"
+ url_password: "{{ eseries_proxy_api_password }}"
+ validate_certs: false
+ connection: local
+ register: proxy_systems
+ - name: Determine associated management interface IP addresses.
+ set_fact:
+ eseries_system_addresses: |-
+ {%- set addresses = [] %}
+ {%- set url_info = eseries_template_api_url | urlsplit %}
+ {%- for address in [eseries_management_interfaces["controller_a"][0]["address"] | default(""),
+ eseries_management_interfaces["controller_a"][1]["address"] | default(""),
+ eseries_management_interfaces["controller_b"][0]["address"] | default(""),
+ eseries_management_interfaces["controller_b"][1]["address"] | default("")] %}
+ {%- if address != "" and addresses.append(address) -%}{%- endif %}
+ {%- endfor %}
+ {{ addresses }}
+ when: eseries_management_interfaces is defined
+
+ - name: Determine storage system SSID based on storage system serial number or associated IP addresses.
+ set_fact:
+ eseries_api_url_list: |-
+ {#- Determine any system that either has the expected serial number or a management ip address -#}
+ {%- set ssids = [] -%}
+ {%- set addresses = [] -%}
+
+ {#- Search discovered storage systems -#}
+ {%- set url_info = eseries_template_api_url | urlsplit %}
+ {%- for system in proxy_systems["json"] -%}
+
+ {#- Check for serial number match -#}
+ {%- if eseries_system_serial is defined and system["chassisSerialNumber"] == eseries_system_serial -%}
+ {%- if ssids.append(system["id"]) -%}{%- endif -%}
+ {%- for address in system["managementPaths"] -%}
+ {%- if addresses.append([url_info["scheme"], "://", address, ":", url_info["port"], url_info["path"]] | join("")) %}{%- endif -%}
+ {%- endfor -%}
+
+ {%- elif eseries_proxy_ssid is defined and eseries_proxy_ssid == system["id"] -%}
+ {%- if ssids.append(system["id"]) -%}{%- endif -%}
+ {%- for address in system["managementPaths"] -%}
+ {%- if addresses.append([url_info["scheme"], "://", address, ":", url_info["port"], url_info["path"]] | join("")) %}{%- endif -%}
+ {%- endfor -%}
+
+ {%- elif eseries_system_addresses is defined and eseries_system_addresses | length > 0 -%}
+ {%- for address in eseries_system_addresses -%}
+ {%- if address in system["managementPaths"] -%}
+ {%- if ssids.append(system["id"]) -%}{%- endif -%}
+ {%- for address in system["managementPaths"] -%}
+ {%- if addresses.append([url_info["scheme"], "://", address, ":", url_info["port"], url_info["path"]] | join("")) %}{%- endif -%}
+ {%- endfor -%}
+ {%- endif -%}
+ {%- endfor -%}
+ {%- endif -%}
+ {%- endfor -%}
+
+ {%- if ssids | unique | length == 1 -%}
+ {{- addresses -}}
+ {%- else -%}[]{%- endif -%}
+
+ - name: Check for valid storage system Web Services API url.
+ include_tasks: collect_facts/validate_system_api_url.yml
+ when: current_eseries_api_is_proxy == True and (current_eseries_api_url is not defined or current_eseries_ssid is not defined)
+ tags: always
+
+# Try discovering eseries_system_api_url if known eseries_system_api_url is not valid
+- name: Attempt to discover storage system.
+ block:
+ - name: Search subnet for storage system.
+ include_tasks: collect_facts/discovery.yml
+ when: discovered_systems is not defined
+
+ - name: Determine storage system Web Services information
+ set_fact:
+ current_eseries_api_info: |-
+ {%- set info = {} -%}
+ {%- if eseries_system_serial is defined -%}
+ {%- set serial = eseries_system_serial | string -%}
+ {%- for result in discovered_systems["results"] if result["systems_found"][serial] is defined -%}
+ {%- if info.update(result["systems_found"][serial]) %}{%- endif -%}
+ {%- endfor %}
+ {%- endif -%}
+ {{ info }}
+
+ - name: Check whether storage system was discovered.
+ fail:
+ msg: "Storage system failed to be discovered! Serial [{{ eseries_system_serial }}]."
+ when: current_eseries_api_info == {}
+
+ - name: Set storage systems Web Services URL information
+ set_fact:
+ current_eseries_api_is_proxy: "{{ current_eseries_api_info['proxy_required'] }}"
+ current_eseries_api_url: "{{ current_eseries_api_info['api_urls'][0] }}"
+ current_eseries_ssid: |-
+ {%- if current_eseries_api_info["proxy_required"] == False -%}
+ 1
+ {%- elif current_eseries_api_info["proxy_required"] == True and current_eseries_api_info['proxy_ssid'] != "" -%}
+ {{- current_eseries_api_info['proxy_ssid'] -}}
+ {%- else -%}
+ {{- eseries_system_serial -}}
+ {%- endif -%}
+ current_eseries_api_username: "{% if current_eseries_api_info['proxy_required'] %}{{ eseries_proxy_api_username | default('admin') }}{% else %}{{ eseries_system_username | default('admin') }}{% endif %}"
+ current_eseries_api_password: "{% if current_eseries_api_info['proxy_required'] %}{{ eseries_proxy_api_password }}{% else %}{{ eseries_system_password }}{% endif %}"
+ current_eseries_validate_certs: "{{ eseries_validate_certs | default(omit) }}"
+ no_log: true
+ when: current_eseries_api_url is not defined or current_eseries_ssid is not defined
+ tags: always
diff --git a/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_common/tasks/collect_facts/prefer_proxy.yml b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_common/tasks/collect_facts/prefer_proxy.yml
new file mode 100644
index 000000000..593a5c57c
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_common/tasks/collect_facts/prefer_proxy.yml
@@ -0,0 +1,107 @@
+- name: Determine existing storage systems in Web Services Proxy.
+ uri:
+ url: "{{ eseries_proxy_api_url | regex_replace('v2/?$', 'v2/storage-systems') }}"
+ headers:
+ Content-Type: "application/json"
+ Accept: "application/json"
+ url_username: "{{ eseries_proxy_api_username | default('admin') }}"
+ url_password: "{{ eseries_proxy_api_password }}"
+ validate_certs: false
+ connection: local
+ register: proxy_systems
+ tags: always
+
+- name: Determine associated management interface IP addresses.
+ set_fact:
+ eseries_system_addresses: |-
+ {%- set addresses = [] %}
+ {%- set url_info = eseries_template_api_url | urlsplit %}
+ {%- for address in [eseries_management_interfaces["controller_a"][0]["address"] | default(""),
+ eseries_management_interfaces["controller_a"][1]["address"] | default(""),
+ eseries_management_interfaces["controller_b"][0]["address"] | default(""),
+ eseries_management_interfaces["controller_b"][1]["address"] | default("")] %}
+ {%- if address != "" and addresses.append(address) -%}{%- endif %}
+ {%- endfor %}
+ {{ addresses }}
+ when: eseries_management_interfaces is defined
+ tags: always
+
+- name: Determine storage system SSID based on storage system serial number or associated IP addresses.
+ set_fact:
+ eseries_ssid_list: |-
+ {#- Determine any system that either has the expected serial number or a management ip address -#}
+ {%- set ssids = [] -%}
+
+ {#- Search discovered storage systems -#}
+ {%- for system in proxy_systems["json"] -%}
+
+ {#- Check for serial number match -#}
+ {%- if eseries_system_serial is defined and system["chassisSerialNumber"] == eseries_system_serial -%}
+ {%- if ssids.append(system["id"]) -%}{%- endif -%}
+
+ {%- elif eseries_proxy_ssid is defined and eseries_proxy_ssid == system["id"] -%}
+ {%- if ssids.append(system["id"]) -%}{%- endif -%}
+
+ {%- elif eseries_system_addresses is defined and eseries_system_addresses | length > 0 -%}
+ {%- for address in eseries_system_addresses -%}
+ {%- if address in system["managementPaths"] -%}
+ {%- if ssids.append(system["id"]) -%}{%- endif -%}
+ {%- endif -%}
+ {%- endfor -%}
+ {%- endif -%}
+ {%- endfor -%}
+ {{- ssids | unique -}}
+ tags: always
+
+- name: Use the Web Services Proxy REST API
+ set_fact:
+ current_eseries_api_url: "{{ eseries_proxy_api_url }}"
+ current_eseries_ssid: "{{ eseries_ssid_list[0] }}"
+ current_eseries_api_username: "{{ eseries_proxy_api_username | default('admin') }}"
+ current_eseries_api_password: "{{ eseries_proxy_api_password }}"
+ current_eseries_validate_certs: "{{ eseries_validate_certs | default(omit) }}"
+ no_log: true
+ when: eseries_ssid_list | length == 1 and (eseries_proxy_ssid is not defined or eseries_proxy_ssid == eseries_ssid_list[0])
+ tags: always
+
+- name: Search subnet for storage system.
+ block:
+ - name: Search subnet for storage system.
+ include_tasks: collect_facts/discovery.yml
+ when: discovered_systems is not defined
+
+ - name: Determine storage system Web Services information
+ set_fact:
+ current_eseries_api_info: |-
+ {%- set info = {} -%}
+ {%- if eseries_system_serial is defined -%}
+ {%- set serial = eseries_system_serial | string -%}
+ {%- for result in discovered_systems["results"] if result["systems_found"][serial] is defined -%}
+ {%- if info.update(result["systems_found"][serial]) %}{%- endif -%}
+ {%- endfor %}
+ {%- endif -%}
+ {{ info }}
+
+ - name: Check whether storage system was discovered.
+ fail:
+ msg: "Storage system failed to be discovered! Serial [{{ eseries_system_serial }}]."
+ when: current_eseries_api_info == {}
+
+ - name: Set storage systems Web Services URL information
+ set_fact:
+ current_eseries_api_is_proxy: "{{ current_eseries_api_info['proxy_required'] }}"
+ current_eseries_api_url: "{{ current_eseries_api_info['api_urls'][0] }}"
+ current_eseries_ssid: |-
+ {%- if eseries_proxy_ssid is defined -%}
+ {{- eseries_proxy_ssid -}}
+ {%- elif current_eseries_api_info["proxy_ssid"] != "" -%}
+ {{- current_eseries_api_info["proxy_ssid"] -}}
+ {%- else -%}
+ {{- eseries_system_serial -}}
+ {%- endif -%}
+ current_eseries_api_username: "{{ eseries_proxy_api_username | default('admin') }}"
+ current_eseries_api_password: "{{ eseries_proxy_api_password }}"
+ current_eseries_validate_certs: "{{ eseries_validate_certs | default(omit) }}"
+ no_log: true
+ when: current_eseries_api_url is not defined or current_eseries_ssid is not defined
+ tags: always
diff --git a/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_common/tasks/collect_facts/validate_system_api_url.yml b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_common/tasks/collect_facts/validate_system_api_url.yml
new file mode 100644
index 000000000..709239d8e
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_common/tasks/collect_facts/validate_system_api_url.yml
@@ -0,0 +1,34 @@
+- name: Validate storage system urls.
+ block:
+ - name: Collect storage system facts.
+ uri:
+ url: "{{ item | regex_replace('v2/?$', 'utils/about') }}"
+ headers:
+ Content-Type: "application/json"
+ Accept: "application/json"
+ validate_certs: false
+ ignore_errors: true
+ connection: local
+ register: about_results
+ loop: "{{ lookup('list', eseries_api_url_list) }}"
+
+ - name: Determine the first successful Web Services REST API url.
+ set_fact:
+ current_eseries_api_url: |-
+ {%- set valid_urls = [] %}
+ {%- for result in about_results["results"] if result["failed"] == false -%}
+ {%- if valid_urls.append(result['item']) %}{%- endif %}
+ {%- endfor %}
+ {{ valid_urls[0] | default("") }}
+
+ - name: Set Web Services REST API credentials.
+ set_fact:
+ current_eseries_api_is_proxy: False
+ current_eseries_ssid: "{{ current_eseries_ssid | default('1') }}"
+ current_eseries_api_username: "{{ eseries_system_username | default('admin') }}"
+ current_eseries_api_password: "{{ eseries_system_password }}"
+ current_eseries_validate_certs: "{{ eseries_validate_certs | default(omit) }}"
+ when: current_eseries_api_url != ""
+ no_log: true
+ when: eseries_api_url_list is defined and eseries_api_url_list | length > 0
+ tags: always
diff --git a/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_common/tasks/main.yml b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_common/tasks/main.yml
new file mode 100644
index 000000000..ba568b99f
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_common/tasks/main.yml
@@ -0,0 +1,14 @@
+- name: Configure SANtricity Web Services Proxy's passwords, certificates, and LDAP.
+ import_tasks: proxy_security.yml
+ run_once: true
+
+- name: Build information for Web Services
+ import_tasks: build_info.yml
+ tags:
+ - always
+
+- name: Configure SANtricity WebServices Proxy
+ import_tasks: proxy.yml
+ run_once: true
+ tags:
+ - always
diff --git a/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_common/tasks/proxy.yml b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_common/tasks/proxy.yml
new file mode 100644
index 000000000..965801d2f
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_common/tasks/proxy.yml
@@ -0,0 +1,49 @@
+- name: Add storage systems to SANtricity Web Services Proxy
+ na_santricity_proxy_systems:
+ api_url: "{{ item['key'] }}"
+ api_username: "{{ item['value']['proxy_username'] }}"
+ api_password: "{{ item['value']['proxy_password'] }}"
+ validate_certs: "{{ item['value']['proxy_validate_certs'] | default(omit) }}"
+ accept_certificate: "{{ item['value']['proxy_accept_certifications'] | default(omit) }}"
+ subnet_mask: "{{ item['value']['proxy_subnet'] }}"
+ password: "{{ item['value']['proxy_default_password'] | default(omit) }}"
+ tags: "{{ item['value']['proxy_default_system_tags'] | default(omit) }}"
+ systems: "{{ item['value']['proxy_systems'] }}"
+ connection: local
+ loop: "{{ lookup('dict', proxy_systems_info, wantlist=True) }}"
+ no_log: true
+ vars:
+ proxy_systems_info: |-
+ {#- Build a dictionary of all inventoried proxies keyed by their api url #}
+ {%- set systems = {} %}
+ {%- for array in ansible_play_hosts_all %}
+ {%- if hostvars[array]["current_eseries_api_is_proxy"] %}
+ {%- set array_info = {} %}
+ {%- if "eseries_system_serial" in hostvars[array] or "eseries_system_addresses" in hostvars[array] %}
+ {%- if array_info.update({
+ "ssid": hostvars[array]["current_eseries_ssid"] | default(omit),
+ "password": hostvars[array]["eseries_system_password"] | default(omit),
+ "serial": hostvars[array]["eseries_system_serial"] | default(omit),
+ "addresses": hostvars[array]["eseries_system_addresses"] | default(omit),
+ "tags": hostvars[array]["eseries_system_tags"] | default(omit)}) %}
+ {%- endif %}
+ {%- endif %}
+ {%- if "eseries_proxy_api_url" in hostvars[array] and "eseries_proxy_api_password" in hostvars[array] %}
+ {%- if hostvars[array]["eseries_proxy_api_url"] in systems %}
+ {%- if systems[hostvars[array]["eseries_proxy_api_url"]]["proxy_systems"].append(array_info) %}{%- endif %}
+ {%- else %}
+ {%- if systems.update({hostvars[array]["eseries_proxy_api_url"]: {
+ "proxy_username": hostvars[array]["eseries_proxy_api_username"] | default("admin"),
+ "proxy_password": hostvars[array]["eseries_proxy_api_password"] | default(omit),
+ "proxy_subnet": hostvars[array]["eseries_subnet"] | default(omit),
+ "proxy_systems": [array_info],
+ "proxy_validate_certs": hostvars[array]["eseries_validate_certs"] | default(omit),
+ "proxy_accept_certifications": hostvars[array]["eseries_proxy_accept_certifications"] | default(omit),
+ "proxy_default_system_tags": hostvars[array]["eseries_proxy_default_system_tags"] | default(omit),
+ "proxy_default_password": hostvars[array]["eseries_proxy_default_password"] | default(omit)}}) %}
+ {%- endif %}
+ {%- endif %}
+ {%- endif %}
+ {%- endif %}
+ {%- endfor %}
+ {{ systems }}
diff --git a/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_common/tasks/proxy_security.yml b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_common/tasks/proxy_security.yml
new file mode 100644
index 000000000..6cfb60246
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_common/tasks/proxy_security.yml
@@ -0,0 +1,241 @@
+- name: Ensure proxy admin password has been set
+ na_santricity_auth:
+ ssid: proxy
+ api_url: "{{ item['key'] }}"
+ api_username: "{{ item['value']['proxy_username'] }}"
+ api_password: "{{ item['value']['current_proxy_password'] }}"
+ validate_certs: "{{ item['value']['proxy_validate_certs'] }}"
+ user: admin
+ password: "{{ item['value']['proxy_password'] }}"
+ minimum_password_length: "{{ item['value']['proxy_minimum_password_length'] }}"
+ connection: local
+ loop: "{{ lookup('dict', proxy_admin, wantlist=True) }}"
+ no_log: true
+ vars:
+ proxy_admin: |-
+ {#- Build a dictionary of all inventoried proxies keyed by their api url #}
+ {%- set systems = {} %}
+ {%- for array in ansible_play_hosts_all %}
+ {%- if "eseries_proxy_api_url" in hostvars[array] and "eseries_proxy_api_password" in hostvars[array] %}
+ {%- if systems.update({hostvars[array]["eseries_proxy_api_url"]: {
+ "proxy_username": hostvars[array]["eseries_proxy_api_username"] | default('admin'),
+ "proxy_password": hostvars[array]["eseries_proxy_api_password"],
+ "current_proxy_password": hostvars[array]["eseries_proxy_api_old_password"] | default(hostvars[array]["eseries_proxy_api_password"]),
+ "proxy_validate_certs": hostvars[array]["eseries_validate_certs"] | default(omit),
+ "proxy_minimum_password_length": hostvars[array]["eseries_proxy_minimum_password_length"] | default(omit)}}) %}
+ {%- endif %}
+ {%- endif %}
+ {%- endfor %}
+ {{ systems }}
+ tags:
+ - always
+
+- name: Ensure proxy non-admin passwords have been set
+ na_santricity_auth:
+ ssid: proxy
+ api_url: "{{ item['value']['proxy_url'] }}"
+ api_username: "{{ item['value']['proxy_url_username'] }}"
+ api_password: "{{ item['value']['proxy_url_password'] }}"
+ validate_certs: "{{ item['value']['proxy_validate_certs'] }}"
+ user: "{{ item['value']['proxy_username'] }}"
+ password: "{{ item['value']['proxy_password'] }}"
+ connection: local
+ loop: "{{ lookup('dict', proxy_non_admin, wantlist=True) }}"
+ no_log: true
+ vars:
+ proxy_non_admin: |-
+ {#- Build a dictionary of all inventoried proxies keyed by their api url containing non-admin usernames/passwords #}
+ {%- set systems = {} %}
+ {%- for array in ansible_play_hosts_all %}
+ {%- if "eseries_proxy_api_url" in hostvars[array] and "eseries_proxy_api_password" in hostvars[array] and
+ (("eseries_proxy_monitor_password" in hostvars[array] and hostvars[array]["eseries_proxy_monitor_password"]) or
+ ("eseries_proxy_security_password" in hostvars[array] and hostvars[array]["eseries_proxy_security_password"]) or
+ ("eseries_proxy_storage_password" in hostvars[array] and hostvars[array]["eseries_proxy_storage_password"]) or
+ ("eseries_proxy_support_password" in hostvars[array] and hostvars[array]["eseries_proxy_support_password"])) %}
+ {%- if "eseries_proxy_monitor_password" in hostvars[array] and hostvars[array]["eseries_proxy_monitor_password"] and
+ systems.update({[hostvars[array]["eseries_proxy_api_url"], 'monitor'] | join("-"): {
+ "proxy_url": hostvars[array]["eseries_proxy_api_url"],
+ "proxy_url_username": hostvars[array]["eseries_proxy_api_username"] | default('admin'),
+ "proxy_url_password": hostvars[array]["eseries_proxy_api_password"],
+ "proxy_username": 'monitor',
+ "proxy_password": hostvars[array]["eseries_proxy_monitor_password"],
+ "proxy_validate_certs": hostvars[array]["eseries_validate_certs"] | default(omit)}}) %}
+ {%- endif %}
+ {%- if "eseries_proxy_security_password" in hostvars[array] and hostvars[array]["eseries_proxy_security_password"] and
+ systems.update({[hostvars[array]["eseries_proxy_api_url"], 'security'] | join("-"): {
+ "proxy_url": hostvars[array]["eseries_proxy_api_url"],
+ "proxy_url_username": hostvars[array]["eseries_proxy_api_username"] | default('admin'),
+ "proxy_url_password": hostvars[array]["eseries_proxy_api_password"],
+ "proxy_username": 'security',
+ "proxy_password": hostvars[array]["eseries_proxy_security_password"],
+ "proxy_validate_certs": hostvars[array]["eseries_validate_certs"] | default(omit)}}) %}
+ {%- endif %}
+ {%- if "eseries_proxy_storage_password" in hostvars[array] and hostvars[array]["eseries_proxy_storage_password"] and
+ systems.update({[hostvars[array]["eseries_proxy_api_url"], 'storage'] | join("-"): {
+ "proxy_url": hostvars[array]["eseries_proxy_api_url"],
+ "proxy_url_username": hostvars[array]["eseries_proxy_api_username"] | default('admin'),
+ "proxy_url_password": hostvars[array]["eseries_proxy_api_password"],
+ "proxy_username": 'storage',
+ "proxy_password": hostvars[array]["eseries_proxy_storage_password"],
+ "proxy_validate_certs": hostvars[array]["eseries_validate_certs"] | default(omit)}}) %}
+ {%- endif %}
+ {%- if "eseries_proxy_support_password" in hostvars[array] and hostvars[array]["eseries_proxy_support_password"] and
+ systems.update({[hostvars[array]["eseries_proxy_api_url"], 'support'] | join("-"): {
+ "proxy_url": hostvars[array]["eseries_proxy_api_url"],
+ "proxy_url_username": hostvars[array]["eseries_proxy_api_username"] | default('admin'),
+ "proxy_url_password": hostvars[array]["eseries_proxy_api_password"],
+ "proxy_username": 'support',
+ "proxy_password": hostvars[array]["eseries_proxy_support_password"],
+ "proxy_validate_certs": hostvars[array]["eseries_validate_certs"] | default(omit)}}) %}
+ {%- endif %}
+ {%- endif %}
+ {%- endfor %}
+ {{ systems }}
+
+- name: Ensure proxy client certificates are installed
+ na_santricity_client_certificate:
+ ssid: proxy
+ api_url: "{{ item['key'] }}"
+ api_username: "{{ item['value']['proxy_username'] }}"
+ api_password: "{{ item['value']['proxy_password'] }}"
+ validate_certs: "{{ item['value']['proxy_validate_certs'] }}"
+ certificates: "{{ item['value']['certificates'] }}"
+ connection: local
+ loop: "{{ lookup('dict', proxy_client_certificates, wantlist=True) }}"
+ when: item['value']['certificates'] | length > 0
+ no_log: true
+ vars:
+ proxy_client_certificates: |-
+ {#- Build a dictionary of all inventoried proxies keyed by their api url #}
+ {%- set systems = {} %}
+ {%- for array in ansible_play_hosts_all %}
+ {%- if "eseries_proxy_api_url" in hostvars[array] and "eseries_proxy_api_password" in hostvars[array] %}
+ {%- set certs = [] -%}
+
+ {#- Add common proxy client certificates -#}
+ {%- if "eseries_proxy_client_certificate_common_certificates" in (hostvars[array].keys() | list) -%}
+ {%- if hostvars[array]["eseries_proxy_client_certificate_common_certificates"] is string -%}
+ {%- if certs.append(hostvars[array]["eseries_proxy_client_certificate_common_certificates"]) -%}{%- endif -%}
+ {%- elif hostvars[array]["eseries_proxy_client_certificate_common_certificates"] is iterable -%}
+ {%- if certs.extend(hostvars[array]["eseries_proxy_client_certificate_common_certificates"]) -%}{%- endif -%}
+ {%- endif -%}
+ {%- endif -%}
+
+ {#- Add proxy specific client certificates -#}
+ {%- if "eseries_proxy_client_certificate_certificates" in (hostvars[array].keys() | list) -%}
+ {%- if hostvars[array]["eseries_proxy_client_certificate_certificates"] is string -%}
+ {%- if hostvars[array]["eseries_proxy_client_certificate_certificates"] not in certs -%}
+ {%- if certs.append(hostvars[array]["eseries_proxy_client_certificate_certificates"]) -%}{%- endif -%}
+ {%- endif -%}
+ {%- elif hostvars[array]["eseries_proxy_client_certificate_certificates"] is iterable -%}
+ {%- for client_cert in hostvars[array]["eseries_proxy_client_certificate_certificates"] if client_cert not in certs -%}
+ {%- if certs.append(client_cert) -%}{%- endif -%}
+ {%- endfor -%}
+ {%- endif -%}
+ {%- endif -%}
+
+ {%- if systems.update({hostvars[array]["eseries_proxy_api_url"]: {
+ "proxy_username": hostvars[array]["eseries_proxy_api_username"] | default('admin'),
+ "proxy_password": hostvars[array]["eseries_proxy_api_password"],
+ "proxy_validate_certs": hostvars[array]["eseries_validate_certs"] | default(omit),
+ "certificates": certs}}) %}
+ {%- endif %}
+ {%- endif %}
+ {%- endfor %}
+ {{ systems }}
+
+- name: Ensure proxy server certificates are installed
+ na_santricity_server_certificate:
+ ssid: proxy
+ api_url: "{{ item['key'] }}"
+ api_username: "{{ item['value']['proxy_username'] }}"
+ api_password: "{{ item['value']['proxy_password'] }}"
+ validate_certs: "{{ item['value']['proxy_validate_certs'] }}"
+ certificates: "{{ item['value']['certificates'] }}"
+ passphrase: "{{ item['value']['passphrase'] }}"
+ connection: local
+ loop: "{{ lookup('dict', proxy_server_certificates, wantlist=True) }}"
+ when: item['value']['certificates'] | length > 0
+ no_log: true
+ vars:
+ proxy_server_certificates: |-
+ {#- Build a dictionary of all inventoried proxies keyed by their api url #}
+ {%- set systems = {} %}
+ {%- for array in ansible_play_hosts_all %}
+ {%- if "eseries_proxy_api_url" in hostvars[array] and "eseries_proxy_api_password" in hostvars[array] %}
+ {%- set certs = [] -%}
+
+ {#- Add common proxy server certificates -#}
+ {%- if "eseries_proxy_server_certificate_common_certificates" in (hostvars[array].keys() | list) -%}
+ {%- if hostvars[array]["eseries_proxy_server_certificate_common_certificates"] is string -%}
+ {%- if certs.append(hostvars[array]["eseries_proxy_server_certificate_common_certificates"]) -%}{%- endif -%}
+ {%- elif hostvars[array]["eseries_proxy_server_certificate_common_certificates"] is iterable -%}
+ {%- if certs.extend(hostvars[array]["eseries_proxy_server_certificate_common_certificates"]) -%}{%- endif -%}
+ {%- endif -%}
+ {%- endif -%}
+
+ {#- Add proxy specific server certificates -#}
+ {%- if "eseries_proxy_server_certificate_certificates" in (hostvars[array].keys() | list) -%}
+ {%- if hostvars[array]["eseries_proxy_server_certificate_certificates"] is string -%}
+ {%- if hostvars[array]["eseries_proxy_server_certificate_certificates"] not in certs -%}
+ {%- if certs.append(hostvars[array]["eseries_proxy_server_certificate_certificates"]) -%}{%- endif -%}
+ {%- endif -%}
+ {%- elif hostvars[array]["eseries_proxy_server_certificate_certificates"] is iterable -%}
+ {%- for client_cert in hostvars[array]["eseries_proxy_server_certificate_certificates"] if client_cert not in certs -%}
+ {%- if certs.append(client_cert) -%}{%- endif -%}
+ {%- endfor -%}
+ {%- endif -%}
+ {%- endif -%}
+
+ {%- if systems.update({hostvars[array]["eseries_proxy_api_url"]: {
+ "proxy_username": hostvars[array]["eseries_proxy_api_username"] | default('admin'),
+ "proxy_password": hostvars[array]["eseries_proxy_api_password"],
+ "proxy_validate_certs": hostvars[array]["eseries_validate_certs"] | default(omit),
+ "certificates": certs,
+ "passphrase": hostvars[array]["eseries_proxy_server_certificate_passphrase"] | default(hostvars[array]["eseries_proxy_server_certificate_common_passphrase"] | default(omit))}}) %}
+ {%- endif %}
+ {%- endif %}
+ {%- endfor %}
+ {{ systems }}
+
+- name: Ensure proxy LDAP have been configured
+ na_santricity_ldap:
+ ssid: proxy
+ api_url: "{{ item['key'] }}"
+ api_username: "{{ item['value']['proxy_username'] }}"
+ api_password: "{{ item['value']['current_password'] | default(item['value']['proxy_password']) }}"
+ validate_certs: "{{ item['value']['proxy_validate_certs'] }}"
+ state: "{{ item['value']['ldap_state'] }}"
+ identifier: "{{ item['value']['ldap_identifier'] | default(omit) }}"
+ server_url: "{{ item['value']['ldap_server'] | default(omit) }}"
+ bind_user: "{{ item['value']['ldap_bind_username'] | default(omit) }}"
+ bind_password: "{{ item['value']['ldap_bind_password'] | default(omit) }}"
+ search_base: "{{ item['value']['ldap_search_base'] | default(omit) }}"
+ user_attribute: "{{ item['value']['ldap_user_attribute'] | default(omit) }}"
+ role_mappings: "{{ item['value']['ldap_role_mappings'] | default(omit) }}"
+ ignore_errors: true
+ connection: local
+ loop: "{{ lookup('dict', proxy_admin, wantlist=True) }}"
+ vars:
+ proxy_admin: |-
+ {#- Build a dictionary of all inventoried proxies keyed by their api url #}
+ {%- set systems = {} %}
+ {%- for array in ansible_play_hosts_all %}
+ {%- if "eseries_proxy_api_url" in hostvars[array] and "eseries_proxy_api_password" in hostvars[array] and "eseries_proxy_ldap_state" in hostvars[array] %}
+ {%- if systems.update({hostvars[array]["eseries_proxy_api_url"]: {
+ "proxy_username": hostvars[array]["eseries_proxy_api_username"] | default('admin'),
+ "proxy_password": hostvars[array]["eseries_proxy_api_password"],
+ "current_proxy_password": hostvars[array]["eseries_proxy_current_api_password"] | default(omit),
+ "proxy_validate_certs": hostvars[array]["eseries_validate_certs"] | default(omit),
+ "ldap_state": hostvars[array]["eseries_proxy_ldap_state"],
+ "ldap_identifier": hostvars[array]["eseries_proxy_ldap_identifier"] | default(omit),
+ "ldap_server": hostvars[array]["eseries_proxy_ldap_server"] | default(omit),
+ "ldap_bind_username": hostvars[array]["eseries_proxy_ldap_bind_username"] | default(omit),
+ "ldap_bind_password": hostvars[array]["eseries_proxy_ldap_bind_password"] | default(omit),
+ "ldap_search_base": hostvars[array]["eseries_proxy_ldap_search_base"] | default(omit),
+ "ldap_user_attribute": hostvars[array]["eseries_proxy_ldap_user_attribute"] | default(omit),
+ "ldap_role_mappings": hostvars[array]["eseries_proxy_ldap_role_mappings"] | default(omit)}}) %}
+ {%- endif %}
+ {%- endif %}
+ {%- endfor %}
+ {{ systems }}
diff --git a/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/README.md b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/README.md
new file mode 100644
index 000000000..011b98565
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/README.md
@@ -0,0 +1,443 @@
+nar_santricity_host
+=========
+ Configures storage pools, volumes, hosts, host groups, and port interfaces for NetApp E-Series storage arrays
+ using iSCSI, FC, SAS, IB, NVMe protocols.
+
+Requirements
+------------
+ - NetApp E-Series E2800 platform or newer or NetApp E-Series SANtricity Web Services Proxy configured for older E-Series Storage arrays.
+
+Tested Ansible Versions
+-----------------------
+ - Ansible 5.x (ansible-core 2.12)
+
+Example Playbook
+----------------
+ - hosts: eseries_storage_systems
+ gather_facts: false
+ collection:
+ - netapp_eseries.santricity
+ tasks:
+ - name: Ensure NetApp E-Series storage system is properly configured
+ import_role:
+ name: nar_santricity_host
+
+Example Storage System Inventory File (Discover storage system with proxy)
+-------------------------------------
+ eseries_system_serial: "012345678901" # Be sure to quote if the serial is all numbers and begins with zero.
+ eseries_system_password: admin_password
+ eseries_proxy_api_url: https://192.168.1.100:8443/devmgr/v2/
+ eseries_proxy_api_password: admin_password
+ eseries_subnet: 192.168.1.0/24
+ eseries_prefer_embedded: true
+ eseries_validate_certs: false
+
+ eseries_initiator_protocol: iscsi
+
+ # Controller port definitions
+ eseries_controller_iscsi_port_config_method: static
+ eseries_controller_iscsi_port_subnet_mask: 255.255.255.0
+ eseries_controller_iscsi_port:
+ controller_a:
+ - address: 192.168.2.100
+ - address: 192.168.2.110
+ controller_b:
+ - address: 192.168.3.100
+ - address: 192.168.3.110
+
+ # Storage pool and volume configuration
+ eseries_storage_pool_configuration:
+ - name: pool[1-2]
+ raid_level: raid6
+ criteria_drive_count: 10
+ volumes:
+ - name: "[pool]_volume[A-C]"
+ host: server_group
+ size: 4096
+
+Example Storage System Inventory File (Without storage system discovery)
+-------------------------------------
+ eseries_system_api_url: https://192.168.1.200:8443/devmgr/v2/
+ eseries_system_password: admin_password
+ eseries_validate_certs: false
+
+ (...) # Same as the previous example
+
+Yet Another Example System Inventory File
+-----------------------------------------
+ eseries_system_api_url: https://192.168.1.200:8443/devmgr/v2/
+ eseries_system_password: admin_password
+ eseries_validate_certs: false
+
+ eseries_initiator_protocol: nvme_ib
+ eseries_controller_nvme_ib_port:
+ controller_a:
+ - 192.168.1.100
+ - 192.168.1.110
+ controller_b:
+ - 192.168.2.100
+ - 192.168.2.110
+
+ eseries_storage_pool_configuration:
+ - name: vg[1-2]
+ raid_level: raid6
+ criteria_drive_count: 12
+ criteria_volume_count: 4
+ criteria_reserve_free_capacity_pct: 7
+ common_volume_host: server_group
+
+Role Variables
+--------------
+**Note that when values are specified below, they indicate the default value.**
+
+ # Web Services Embedded information
+ eseries_subnet: # Network subnet to search for the storage system specified in CIDR form. Example: 192.168.1.0/24
+ eseries_system_serial: # Storage system serial number. Be sure to quote if the serial is all numbers and begins with zero. (This is located
+ # on a label at the top-left towards the front on the device)
+ eseries_system_addresses: # Storage system management IP addresses. Only required when eseries_system_serial or eseries_system_api_url are not
+ # defined. When not specified, addresses will be populated with eseries_management_interfaces controller addresses.
+ eseries_system_api_url: # Url for the storage system's for embedded web services rest api. Example: https://192.168.10.100/devmgr/v2
+ eseries_system_username: admin # Username for the storage system's for embedded web services rest api
+ eseries_system_password: # Password for the storage system's for embedded web services rest api and when the admin password has not been set
+ # eseries_system_password will be used to set it.
+ eseries_proxy_ssid: # Arbitrary string for the proxy to represent the storage system. eseries_system_serial will be used when not defined.
+ eseries_template_api_url: # Template for the web services api url. Default: https://0.0.0.0:8443/devmgr/v2/
+ eseries_prefer_embedded: false # Overrides the default behavior of using Web Services Proxy when eseries_proxy_api_url is defined. This will only effect storage systems that have Embedded Web Services.
+ eseries_validate_certs: true # Indicates Whether SSL certificates should be verified. Used for both embedded and proxy. Choices: true, false
+
+ # Web Services Proxy information
+ Note: eseries_proxy_* variables are required to discover storage systems prior to SANtricity OS version 11.60.2.
+ eseries_proxy_api_url: # Url for the storage system's for proxy web services rest api. Example: https://192.168.10.100/devmgr/v2
+ eseries_proxy_api_username: # Username for the storage system's for proxy web services rest api.
+ eseries_proxy_api_password: # Password for the storage system's for proxy web services rest api and when the admin password has not been set
+ # eseries_proxy_api_password will be used to set it.
+
+ # Controller iSCSI Interface Port Default Policy Specifications
+ eseries_controller_iscsi_port_state: enabled # Generally specifies whether a controller port definition should be applied Choices: enabled, disabled
+ eseries_controller_iscsi_port_config_method: dhcp # General port configuration method definition for both controllers. Choices: static, dhcp
+ eseries_controller_iscsi_port_gateway: # General port IPv4 gateway for both controllers.
+ eseries_controller_iscsi_port_subnet_mask: # General port IPv4 subnet mask for both controllers.
+ eseries_controller_iscsi_port_mtu: 9000 # General port maximum transfer units (MTU) for both controllers. Any value greater than 1500 (bytes).
+ eseries_controller_iscsi_port_speed: # General port speed for both controllers.
+ eseries_controller_iscsi_port:
+ controller_a: # Ordered list of controller A channel definition.
+ - state: # Whether the port should be enabled. Choices: enabled, disabled
+ config_method: # Port configuration method Choices: static, dhcp
+ address: # Port IPv4 address
+ gateway: # Port IPv4 gateway
+ subnet_mask: # Port IPv4 subnet_mask
+ mtu: # Port IPv4 mtu
+ speed: # Port speed
+ controller_b: # Ordered list of controller B channel definition.
+ - (...) # Same as controller A but for controller B
+
+ # Controller InfiniBand iSER Interface Channel
+ eseries_controller_ib_iser_port:
+ controller_a: # Ordered list of controller A channel address definition.
+ - # Port IPv4 address for channel 1
+ - (...) # So on and so forth
+ controller_b: # Ordered list of controller B channel address definition.
+
+ # Controller NVMe over InfiniBand Interface Channel
+ eseries_controller_nvme_ib_port:
+ controller_a: # Ordered list of controller A channel address definition.
+ - # Port IPv4 address for channel 1
+ - (...) # So on and so forth
+ controller_b: # Ordered list of controller B channel address definition.
+
+ # Controller NVMe RoCE Interface Port Default Policy Specifications
+ eseries_controller_nvme_roce_port_state: enabled # Generally specifies whether a controller port definition should be applied Choices: enabled, disabled
+ eseries_controller_nvme_roce_port_config_method: dhcp # General port configuration method definition for both controllers. Choices: static, dhcp
+ eseries_controller_nvme_roce_port_gateway: # General port IPv4 gateway for both controllers.
+ eseries_controller_nvme_roce_port_subnet_mask: # General port IPv4 subnet mask for both controllers.
+ eseries_controller_nvme_roce_port_mtu: 4200 # General port maximum transfer units (MTU). Any value greater than 1500 (bytes).
+ eseries_controller_nvme_roce_port_speed: auto # General interface speed. Value must be a supported speed or auto for automatically negotiating the speed with the port.
+ eseries_controller_nvme_roce_port:
+ controller_a: # Ordered list of controller A channel definition.
+ - state: # Whether the port should be enabled.
+ config_method: # Port configuration method Choices: static, dhcp
+ address: # Port IPv4 address
+ subnet_mask: # Port IPv4 subnet_mask
+ gateway: # Port IPv4 gateway
+ mtu: # Port IPv4 mtu
+ speed: # Port IPv4 speed
+ controller_b: # Ordered list of controller B channel definition.
+ - (...) # Same as controller A but for controller B
+
+ # Target discovery specifications
+ Note: add the following to ansible-playbook command to update the chap secret: --extra-vars "eseries_target_chap_secret_update=True
+ eseries_target_name: # iSCSI target name that will be seen by the initiator
+ eseries_target_ping: True # Enables ICMP ping response from the configured iSCSI ports (boolean)
+ eseries_target_unnamed_discovery: True # Whether the iSCSI target iqn should be returned when an initiator performs a discovery session.
+ eseries_target_chap_secret: # iSCSI chap secret. When left blank, the chap secret will be removed from the storage system.
+ eseries_target_chap_secret_update: False # DO NOT REMOVE! Since na_santricity_iscsi_target cannot compare the chap secret with the current and chooses to always
+ # return changed=True, this flag is used to force the module to update the chap secret. It is preferable to
+ # leave this value False and to add the --extra-vars "eseries_target_chap_secret_update=True".
+
+ # Storage Pool Default Policy Specifications
+ eseries_storage_pool_state: present # Default storage pool state. Choices: present, absent
+ eseries_storage_pool_raid_level: raidDiskPool # Default volume raid level. Choices: raid0, raid1, raid5, raid6, raidDiskPool
+ eseries_storage_pool_secure_pool: false # Default for storage pool drive security. This flag will enable the security at rest feature. There
+ # must be sufficient FDE or FIPS security capable drives. Choices: true, false
+ eseries_storage_pool_criteria_drive_count: # Default storage pool drive count.
+ eseries_storage_pool_reserve_drive_count: # Default reserve drive count for drive reconstruction for storage pools using dynamic disk pool and
+ # the raid level must be set for raidDiskPool.
+ eseries_storage_pool_criteria_min_usable_capacity: # Default minimum required capacity for storage pools.
+ eseries_storage_pool_criteria_drive_type: # Default drive type for storage pools. Choices: hdd, ssd
+ eseries_storage_pool_criteria_drive_interface_type: # Default interface type to use when selecting drives for the storage pool.
+ # Choices: scsi, fibre, sata, pata, fibre520b, sas, sas4k, nvme4k
+ eseries_storage_pool_criteria_size_unit: gb # Default unit size for all storage pool related sizing.
+ # Choices: bytes, b, kb, mb, gb, tb, pb, eb, zb, yb
+ eseries_storage_pool_criteria_drive_min_size: # Default minimum drive size for storage pools.
+ eseries_storage_pool_criteria_drive_max_size: # Default maximum drive size for storage pools.
+ eseries_storage_pool_criteria_drive_require_da: # Default for whether storage pools are required to have data assurance (DA) compatible drives.
+ # Choices: true, false
+ eseries_storage_pool_criteria_drive_require_fde: # Default for whether storage pools are required to have drive security compatible drives.
+ # Choices: true, false
+ eseries_storage_pool_remove_volumes: # Default policy for deleting volumes prior to removing storage pools.
+ eseries_storage_pool_erase_secured_drives: # Default policy for erasing the content drives during create and delete storage pool operations.
+ # Choices: true, false
+ eseries_storage_pool_ddp_critical_threshold_pct: # Default policy for dynamic disk pool alert critical threshold.
+ eseries_storage_pool_ddp_warning_threshold_pct: # Default policy for dynamic disk pool alert warning threshold.
+
+ # Volume Default Policy Specifications
+ eseries_volume_state: present # Default volume state. Choices: present, absent
+ eseries_volume_size_unit: gb # Default unit size for all volume sizing options.
+ # Choices: bytes, b, kb, mb, gb, tb, pb, eb, zb, yb, pct
+ eseries_volume_size: # Default volume size or the presented size for thinly provisioned volumes.
+ eseries_volume_data_assurance_enabled: # Default for whether data assurance(DA) is required to be enabled.
+ eseries_volume_segment_size_kb: # Default segment size measured in kib.
+ eseries_volume_read_cache_enable: # Default for read caching which will cache all read requests.
+ eseries_volume_read_ahead_enable: # Default for read ahead caching; this is good for sequential workloads to cache subsequent blocks.
+ eseries_volume_write_cache_enable: # Default for write caching which will cache all writes.
+ eseries_volume_write_cache_mirror_enable: # Default for write cache mirroring which mirrors writes to both controller's cache.
+ eseries_volume_cache_without_batteries: # Default for allowing caching when batteries are not present.
+ eseries_volume_thin_provision: # Default for whether volumes should be thinly provisioned.
+ eseries_volume_thin_volume_repo_size: # Default for actually allocated space for thinly provisioned volumes.
+ eseries_volume_thin_volume_max_repo_size: # Default for the maximum allocated space allowed for thinly provisioned volumes.
+ eseries_volume_thin_volume_expansion_policy: # Default thin volume expansion policy. Choices: automatic, manual
+ eseries_volume_thin_volume_growth_alert_threshold: # Default thin volume growth alert threshold; this is the threshold for when the thin volume expansion
+ # policy will be enacted. Allowable values are between and including 10% and 99%
+ eseries_volume_ssd_cache_enabled: # Default for ssd cache which will enable the volume to use an existing SSD cache on the storage array.
+ eseries_volume_host: # Default host for all volumes; the value can be any host from the Ansible inventory. Any initiator may be
+ # used whether connected or not since the storage array does not require connectivity in order to create
+ # host objects.
+ eseries_volume_workload_name: # Default workload tag name
+ eseries_volume_workload_metadata: # Default workload metadata
+ eseries_volume_volume_metadata: # Default volume_metadata
+ eseries_volume_owning_controller # Default preferred owning controller
+ eseries_volume_allow_expansion: false # Default for whether volume expansions are permitted
+ eseries_volume_wait_for_initialization: false # Default for whether volume creation with wait for initialization to complete
+
+ # Storage Pool-Volume Mapping Default Policy Specifications
+ eseries_lun_mapping_state: present # Generally specifies whether a LUN mapping should be present. This is useful when adding a default host for all
+ # volumes. Choices: present, absent
+ eseries_lun_mapping_host: # Default host for all volumes not specifically give a host either in common_volume_configuration or in
+ # eseries_storage_pool_configuration.
+
+ # Storage Pool-Volume Default Policy Specifications
+ Name schemes: Storage pool and volume names can be used to specify a naming scheme to produce a list of storage pools and volumes. The scheme are defined by
+ brackets and can be used to specify a range of lowercase letters, uppercase letters, range of single digit numbers, any top-level inventory
+ variables, and the current defined storage pool (volume only).
+ eseries_storage_pool_configuration:
+ - name: # Name or name scheme (see above) for the storage pool.
+ state: # Specifies whether the storage pool should exist (present, absent). When removing an existing storage array all of the
+ # volumes must be defined with state=absent.
+ raid_level # Volume group raid level. Choices: raid0, raid1, raid5, raid6, raidDiskPool (Default: raidDiskPool)
+ secure_pool: # Default for storage pool drive security. This flag will enable the security at rest feature. There must be sufficient FDE
+ # or FIPS security capable drives. Choices: true, false
+ criteria_drive_count: # Default storage pool drive count.
+ criteria_volume_count: # Number of equally sized volumes to create. All available storage pool space will be used. The option will be ignored
+ # if volumes is defined.
+ criteria_reserve_free_capacity_pct # Percent of reserve free space capacity to leave when creating the criteria_volume_count volumes.
+ common_volume_host # Host or host group for the criteria_volume_count volumes should be mapped.
+ reserve_drive_count: # Default reserve drive count for drive reconstruction for storage pools using dynamic disk pool and the raid level must be
+ # set for raidDiskPool.
+ criteria_size_unit: # Unit size for all storage pool related sizing. Choices: bytes, b, kb, mb, gb, tb, pb, eb, zb, yb, pct
+ criteria_min_usable_capacity: # Minimum required capacity for storage pools.
+ criteria_drive_type: # Drive type for storage pools. Choices: hdd, ssd
+ criteria_drive_interface_type # Interface type to use when selecting drives for the storage pool. Choices: scsi, fibre, sata, pata, fibre520b, sas, sas4k, nvme4k
+ criteria_drive_min_size: # Minimum drive size for storage pools.
+ criteria_drive_max_size: # Maximum drive size for storage pools.
+ criteria_drive_require_da: # Whether storage pools are required to have data assurance (DA) compatible drives. Choices: true, false
+ criteria_drive_require_fde: # Whether storage pools are required to have drive security compatible drives. Choices: true, false
+ remove_volumes: # Policy for deleting volumes prior to removing storage pools.
+ erase_secured_drives: # Policy for erasing the content drives during create and delete storage pool operations. Choices: true, false
+ common_volume_configuration: # Any option that can be specified at the volume level can be generalized here at the storage pool level. This is useful when
+ # all volumes share common configuration definitions.
+ volumes: # List of volumes associated the storage pool.
+ - state: # Specifies whether the volume should exist (present, absent)
+ name: # (required) Name or name scheme (see above) for the volume(s) to be created in the storage pool(s)
+ host: # host or host group for the volume should be mapped to.
+ host_type: # Only required when using something other than Linux kernel 3.10 or later with DM-MP (Linux DM-MP),
+ # non-clustered Windows (Windows), or the storage system default host type is incorrect. Common definitions below:
+ # - AIX MPIO: The Advanced Interactive Executive (AIX) OS and the native MPIO driver
+ # - AVT 4M: Silicon Graphics, Inc. (SGI) proprietary multipath driver; refer to the SGI installation documentation for more information
+ # - HP-UX: The HP-UX OS with native multipath driver
+ # - Linux ATTO: The Linux OS and the ATTO Technology, Inc. driver (must use ATTO FC HBAs)
+ # - Linux DM-MP: The Linux OS and the native DM-MP driver
+ # - Linux Pathmanager: The Linux OS and the SGI proprietary multipath driver; refer to the SGI installation documentation for more information
+ # - Mac: The Mac OS and the ATTO Technology, Inc. driver
+ # - ONTAP: FlexArray
+ # - Solaris 11 or later: The Solaris 11 or later OS and the native MPxIO driver
+ # - Solaris 10 or earlier: The Solaris 10 or earlier OS and the native MPxIO driver
+ # - SVC: IBM SAN Volume Controller
+ # - VMware: ESXi OS
+ # - Windows: Windows Server OS and Windows MPIO with a DSM driver
+ # - Windows Clustered: Clustered Windows Server OS and Windows MPIO with a DSM driver
+ # - Windows ATTO: Windows OS and the ATTO Technology, Inc. driver
+ size: # Size of the volume or presented size of the thinly provisioned volume.
+ size_unit: # Unit size for the size, thin_volume_repo_size, and thin_volume_max_repo_size
+ # Choices: bytes, b, kb, mb, gb, tb, pb, eb, zb, yb, pct
+ segment_size_kb: # Indicates the amount of data stored on a drive before moving on to the next drive in the volume group. Does not apply to pool volumes.
+ thin_provision: # Whether volumes should be thinly provisioned.
+ thin_volume_repo_size: # Actually allocated space for thinly provisioned volumes.
+ thin_volume_max_repo_size: # Maximum allocated space allowed for thinly provisioned volumes.
+ thin_volume_expansion_policy: # Thin volume expansion policy. Choices: automatic, manual
+ thin_volume_growth_alert_threshold: # Thin volume growth alert threshold; this is the threshold for when the thin volume expansion
+ # policy will be enacted. Allowable values are between and including 10% and 99%
+ ssd_cache_enabled: # Enables ssd cache which will enable the volume to use an existing SSD cache on the storage array.
+ data_assurance_enabled: # Enables whether data assurance(DA) is required to be enabled.
+ read_cache_enable: # Enables read caching which will cache all read requests.
+ read_ahead_enable: # Enables read ahead caching; this is good for sequential workloads to cache subsequent blocks.
+ write_cache_enable: # Enables write caching which will cache all writes.
+ write_cache_mirror_enable: # Enables write cache mirroring which mirrors writes to both controller's cache.
+ workload_name: # Name of the volume's workload. This can be defined using the metadata option or, if already defined, specify one already
+ # created on the storage array.
+ workload_metadata: # Dictionary containing arbitrary entries normally used for defining the volume(s) workload.
+ volume_metadata # Dictionary containing arbitrary entries used to define information about the volume itself.
+ # Note: format_type, format_options[0-9]?, mount_directory, mount_options[0-9]? are used by netapp_eseries.host.mount role to format and mount volumes.
+ allow_expansion: # Whether volume expansions are permitted
+ wait_for_initialization: # Whether volume creation with wait for initialization to complete
+
+ # Snapshot Consistency Group Default Policy Specifications
+ eseries_snapshot_remove_unspecified: # Whether to remove any snapshot group or view that is not specified (Default: false).
+ eseries_snapshot_groups_maximum_snapshots: # Default maximum point-in-time snapshot images (Default: 32).
+ eseries_snapshot_groups_reserve_capacity_pct: # Default reserve capacity percentage (Default: 40)
+ eseries_snapshot_groups_preferred_reserve_storage_pool: # Preferred storage pool or volume group for the reserve capacity volume.
+ eseries_snapshot_groups_reserve_capacity_full_policy: # Default full reserve capacity policy (Default: purge). Choices: [purge, reject]
+ eseries_snapshot_groups_alert_threshold_pct: # Default reserve capacity percentage full to alert administrators (Default 75).
+ eseries_snapshot_groups:
+ - name: # Name of snapshot consistency group.
+ maximum_snapshots: # Maximum allowed snapshot point-in-time images for consistency group (Default: 32).
+ reserve_capacity_pct: # Reserve capacity measured as a percentage of the base volume (Default: 40). Reserve capacity can be expanded
+ # and trimmed; however, the trim operation requires there be no base volume snapshots images in the group.
+ reserve_capacity_full_policy: # Policy to implement when reserve capacity is full (Default: purge). Choices [purge, reject]
+ alert_threshold_pct: # Reserve capacity full alert threshold for storage system administrators (Default: 75).
+ rollback_priority: # Storage system priority for base volume rollback (Default: medium). Choices [lowest, low, medium, high, highest]
+ volumes: # Information for each volume in the consistency group.
+ - volume: # Base volume name
+ reserve_capacity_pct: # Reserve capacity measured as a percentage of the base volume (Default: 40). Reserve capacity can be expanded
+ # and trimmed; however, the trim operation requires there be no base volume snapshots images in the group.
+ preferred_reserve_storage_pool: # Preferred reserve capacity storage pool or volume group. This will default to the base volume's
+ # storage pool or volume group. The reserve capacity volume cannot be changed once created.
+ - (...)
+
+ # Snapshot Consistency Group View Default Policy Specifications
+ eseries_snapshot_views_host: # Default host or host group to map all snapshot volumes.
+ eseries_snapshot_views_reserve_capacity_pct: # Default reserve capacity percentage (Default: 40)
+ eseries_snapshot_views_preferred_reserve_storage_pool: # Preferred storage pool or volume group for the reserve capacity volume.
+ eseries_snapshot_views_alert_threshold_pct: # Default reserve capacity percentage full to alert administrators (Default 75).
+ eseries_snapshot_views_writable: # Default for whether to make snapshot volumes writable.
+ eseries_snapshot_views_validate: # Default for whether to validate snapshot volumes after creation.
+ eseries_snapshot_views:
+ - volume: # Consistency group's snapshot view's name.
+ group_name: # Snapshot consistency group's name.
+ pit_name: # Point-in-time snapshot images group name. (Only available when specified using Ansible na_santricity_module (via direct or role)
+ pit_timestamp: # Point-in-time snapshot images group timestamp. Snapshot image timestamp in the YYYY-MM-DD HH:MM:SS (AM|PM) (hours, minutes, seconds, and day-period are optional)
+ host: # Host or host group to map snapshot volumes.
+ writable: # Whether snapshot volume of base volume images should be writable.
+ validate: # Whether snapshot volume should be validated which includes both a media scan and parity validation.
+ reserve_capacity_pct: # Percentage of base volume capacity to reserve for snapshot copy-on-writes (COW). Only used when snapshot volume is writable.
+ preferred_reserve_storage_pool: # Preferred storage pool or volume group for the reserve capacity volume.
+ alert_threshold: # Reserve capacity percentage full to alert administrators
+ volumes: # (Optional) Select subset of volumes within the snapshot consistency group.
+ - name: # Name of volume within consistency group.
+ host: # Host or host group to map snapshot volumes.
+ lun: # Logical unit number (LUN) mapping for the host or host group.
+ writable: # Whether snapshot volume of base volume images should be writable.
+ validate: # Whether snapshot volume should be validated which includes both a media scan and parity validation.
+ reserve_capacity_pct: # Percentage of base volume capacity to reserve for snapshot copy-on-writes (COW). Only used when snapshot volume is writable.
+ preferred_reserve_storage_pool: # Preferred storage pool or volume group for the reserve capacity volume.
+
+ # Snapshot Consistency Group Rollback Default Policy Specifications
+ eseries_snapshot_rollback_priority: medium # Default point-in-time rollback priority (Default: medium). Choices [lowest, low, medium, high, highest]
+ eseries_snapshot_rollback_backup: true # Default whether snapshot should be taken prior to rolling back base volumes (Default: true).
+ eseries_snapshot_rollbacks:
+ - group_name: # Snapshot consistency group's name.
+ pit_name: # Point-in-time snapshot images group name (Only available when specified using Ansible na_santricity_module (via direct or role)
+ pit_timestamp: # Point-in-time snapshot images group timestamp. Snapshot image timestamp in the YYYY-MM-DD HH:MM:SS (AM|PM) (hours, minutes, seconds, and day-period are optional)
+ rollback_priority: # Storage system priority for base volume rollback (Default: medium). Choices [lowest, low, medium, high, highest]
+ rollback_backup: # Whether to create point-in-time snapshot images of the consistency group prior to rollback.
+ volumes:
+
+ # Initiator-Target Protocol Variable Defaults
+ Note that the following commands need to produce a unique list of IQNs or WWNs of the interfaces used, line separated. Overwrite as necessary.
+ eseries_initiator_protocol: fc # This variable defines which protocol the storage array will use. Choices: fc, iscsi, sas, ib_iser, ib_srp, nvme_ib, nvme_fc, nvme_roce
+ eseries_initiator_command:
+ fc:
+ linux: "cat /sys/class/fc_host/host*/port_name | sort | uniq"
+ windows: "(Get-InitiatorPort | Where-Object -P ConnectionType -EQ 'Fibre Channel' | Select-Object -Property PortAddress |
+ Format-Table -AutoSize -HideTableHeaders | Out-String).trim()"
+ iscsi:
+ linux: "grep -o iqn.* /etc/iscsi/initiatorname.iscsi"
+ windows: "(get-initiatorPort | select-object -property nodeaddress | sort-object | get-unique | ft -autoSize | out-string -stream |
+ select-string iqn | out-string).trim()"
+ sas:
+ # NetApp IMT for SAS attached E-Series SAN hosts recommends adding all possible SAS addresses with the base address
+ # starting at 0, and the last address ending in 3 for single port HBAs, or 7 for dual port HBAs. Since determining
+ # single vs . dual port HBAs adds complexity, we always add all 8 possible permutations of the SAS address.
+ linux: "cat /sys/class/sas_host/host*/device/scsi_host/*/host_sas_address | sort | uniq"
+ windows: "(Get-InitiatorPort | Where-Object -P ConnectionType -EQ 'SAS' | Select-Object -Property PortAddress | Format-Table -AutoSize -HideTableHeaders | Out-String).trim()"
+ ib_iser:
+ linux: "grep -o iqn.* /etc/iscsi/initiatorname.iscsi"
+ windows: "" # add windows command for determining host iqn address(es)
+ ib_srp:
+ linux: "for fp in /sys/class/infiniband/*/ports/*/gids/*; do out=`cat $fp | tr -d :`; port=`expr substr $out 17 32`; if [ $port != 0000000000000000 ]; then echo 0x$port; fi; done | sort | uniq"
+ windows: "" # add windows command for determining host guid
+ nvme_ib:
+ linux: "grep -o nqn.* /etc/nvme/hostnqn"
+ windows: "" # add windows command for determining host nqn address(es)
+ nvme_fc:
+ linux: "grep -o nqn.* /etc/nvme/hostnqn"
+ windows: "" # add windows command for determining host nqn address(es)
+ nvme_roce:
+ linux: "grep -o nqn.* /etc/nvme/hostnqn"
+ windows: "" # add windows command for determining host nqn address(es)
+
+ # Manual host definitions, Linux and Windows systems can be automatically populated based on host mappings found in eseries_storage_pool_configuration
+ eseries_host_force_port: true # Default for whether ports are to be allowed to be re-assigned (boolean)
+ eseries_host_remove_unused_hostgroup: true # Forces any unused groups to be removed
+ eseries_host_object:
+ - name: # Host label as referenced by the storage array.
+ state: # Specifies whether host definition should be exist. Choices: present, absent
+ ports: # List of port definitions
+ - type: # Port protocol definition (iscsi, fc, sas, ib, nvme). Note that you should use 'iscsi' prior to Santricity version 11.60 for IB iSER.
+ label: # Arbitrary port label
+ port: # Port initiator (iqn, wwn, etc)
+ group: # Host's host group
+ host_type: # Only required when using something other than Linux kernel 3.10 or later with DM-MP (Linux DM-MP),
+ # non-clustered Windows (Windows), or the storage system default host type is incorrect. Common definitions below:
+ # - AIX MPIO: The Advanced Interactive Executive (AIX) OS and the native MPIO driver
+ # - AVT 4M: Silicon Graphics, Inc. (SGI) proprietary multipath driver; refer to the SGI installation documentation for more information
+ # - HP-UX: The HP-UX OS with native multipath driver
+ # - Linux ATTO: The Linux OS and the ATTO Technology, Inc. driver (must use ATTO FC HBAs)
+ # - Linux DM-MP: The Linux OS and the native DM-MP driver
+ # - Linux Pathmanager: The Linux OS and the SGI proprietary multipath driver; refer to the SGI installation documentation for more information
+ # - Mac: The Mac OS and the ATTO Technology, Inc. driver
+ # - ONTAP: FlexArray
+ # - Solaris 11 or later: The Solaris 11 or later OS and the native MPxIO driver
+ # - Solaris 10 or earlier: The Solaris 10 or earlier OS and the native MPxIO driver
+ # - SVC: IBM SAN Volume Controller
+ # - VMware: ESXi OS
+ # - Windows: Windows Server OS and Windows MPIO with a DSM driver
+ # - Windows Clustered: Clustered Windows Server OS and Windows MPIO with a DSM driver
+ # - Windows ATTO: Windows OS and the ATTO Technology, Inc. driver
+
+License
+-------
+ BSD-3-Clause
+
+Author Information
+------------------
+ Nathan Swartz (@ndswartz)
diff --git a/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/defaults/main.yml b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/defaults/main.yml
new file mode 100644
index 000000000..da9db4c7d
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/defaults/main.yml
@@ -0,0 +1,367 @@
+# Web Services Embedded information
+#eseries_subnet: # Network subnet to search for the storage system specified in CIDR form. Example: 192.168.1.0/24
+#eseries_system_serial: # Storage system serial number. Be sure to quote if the serial is all numbers and begins with zero. (This is located
+ # on a label at the top-left towards the front on the device)
+#eseries_system_addresses: # Storage system management IP addresses. Only required when eseries_system_serial or eseries_system_api_url are not
+ # defined. When not specified, addresses will be populated with eseries_management_interfaces controller addresses.
+#eseries_system_api_url: # Url for the storage system's for embedded web services rest api. Example: https://192.168.10.100/devmgr/v2
+#eseries_system_username: # Username for the storage system's for embedded web services rest api
+#eseries_system_password: # Password for the storage system's for embedded web services rest api and when the admin password has not been set
+ # eseries_system_password will be used to set it.
+#eseries_proxy_ssid: # Arbitrary string for the proxy to represent the storage system. eseries_system_serial will be used when not defined.
+#eseries_template_api_url: # Template for the web services api url. Default: https://0.0.0.0:8443/devmgr/v2/
+#eseries_validate_certs: # Indicates Whether SSL certificates should be verified. Used for both embedded and proxy. Choices: true, false
+
+# Web Services Proxy information
+# Note: eseries_proxy_* variables are required to discover storage systems prior to SANtricity OS version 11.60.2.
+#eseries_proxy_api_url: # Url for the storage system's for proxy web services rest api. Example: https://192.168.10.100/devmgr/v2
+#eseries_proxy_api_username: # Username for the storage system's for proxy web services rest api.
+#eseries_proxy_api_password: # Password for the storage system's for proxy web services rest api and when the admin password has not been set
+ # eseries_proxy_api_password will be used to set it.
+
+
+# Storage Pool Default Policy Specifications
+# ------------------------------------------
+eseries_storage_pool_state: present # Default storage pool state. Choices: present, absent
+eseries_storage_pool_raid_level: raidDiskPool # Default volume raid level. Choices: raid0, raid1, raid5, raid6, raidDiskPool
+eseries_storage_pool_secure_pool: false # Default for storage pool drive security. This flag will enable the security at rest feature. There
+ # must be sufficient FDE or FIPS security capable drives. Type: boolean
+#eseries_storage_pool_criteria_drive_count: # Default storage pool drive count.
+#eseries_storage_pool_reserve_drive_count: # Default reserve drive count for drive reconstruction for storage pools using dynamic disk pool and
+ # the raid level must be set for raidDiskPool.
+#eseries_storage_pool_criteria_min_usable_capacity: # Default minimum required capacity for storage pools.
+#eseries_storage_pool_criteria_drive_type: # Default drive type for storage pools. Choices: hdd, ssd
+eseries_storage_pool_criteria_size_unit: gb # Default unit size for all storage pool related sizing.
+ # Choices: bytes, b, kb, mb, gb, tb, pb, eb, zb, yb
+#eseries_storage_pool_criteria_drive_min_size: # Default minimum drive size for storage pools.
+#eseries_storage_pool_criteria_drive_require_da: # Default for whether storage pools are required to have data assurance (DA) compatible drives.
+ # Type: boolean
+#eseries_storage_pool_criteria_drive_require_fde: # Default for whether storage pools are required to have drive security compatible drives.
+ # Type: boolean
+#eseries_storage_pool_usable_drives: # Ordered list of <tray_number>:<drive_slot> strings for drive candidates. This is useful to control
+ # drive selections.
+eseries_storage_pool_remove_volumes: True # Default policy for deleting volumes prior to removing storage pools.
+#eseries_storage_pool_erase_secured_drives: # Default policy for erasing the content drives during create and delete storage pool operations.
+ # Type: boolean
+#eseries_storage_pool_ddp_critical_threshold_pct: # Default policy for dynamic disk pool alert critical threshold.
+#eseries_storage_pool_ddp_warning_threshold_pct: # Default policy for dynamic disk pool alert warning threshold.
+
+# Volume Default Policy Specifications
+# ------------------------------------
+eseries_volume_state: present # Default volume state. Choices: present, absent
+eseries_volume_size_unit: gb # Default unit size for all volume sizing options.
+#eseries_volume_size: # Default volume size or the presented size for thinly provisioned volumes.
+#eseries_volume_data_assurance_enabled: # Default for whether data assurance(DA) is required to be enabled.
+#eseries_volume_segment_size_kb: # Default segment size measured in kib.
+#eseries_volume_read_cache_enable: # Default for read caching which will cache all read requests.
+#eseries_volume_read_ahead_enable: # Default for read ahead caching; this is good for sequential workloads to cache subsequent blocks.
+#eseries_volume_write_cache_enable: # Default for write caching which will cache all writes.
+#eseries_volume_cache_without_batteries: # Default for allowing caching when batteries are not present.
+#eseries_volume_thin_provision: # Default for whether volumes should be thinly provisioned.
+#eseries_volume_thin_volume_repo_size: # Default for actually allocated space for thinly provisioned volumes.
+#eseries_volume_thin_volume_max_repo_size: # Default for the maximum allocated space allowed for thinly provisioned volumes.
+#eseries_volume_thin_volume_expansion_policy: # Default thin volume expansion policy. Choices: automatic, manual
+#eseries_volume_thin_volume_growth_alert_threshold: # Default thin volume growth alert threshold; this is the threshold for when the thin volume expansion
+ # policy will be enacted. Allowable values are between and including 10% and 99%
+#eseries_volume_ssd_cache_enabled: # Default for ssd cache which will enable the volume to use an existing SSD cache on the storage array.
+#eseries_volume_host: # Default host for all volumes; the value can be any host from the Ansible inventory. Any initiator may be
+ # used whether connected or not since the storage array does not require connectivity in order to create
+ # host objects.
+#eseries_volume_workload_name: # Default workload tag name
+#eseries_volume_metadata: # Default metadata
+#eseries_volume_owning_controller # Default preferred owning controller
+eseries_volume_wait_for_initialization: false # Default for whether volume creation with wait for initialization to complete
+
+
+# Storage Pool-Volume Mapping Default Policy Specifications
+# ---------------------------------------------------------
+eseries_lun_mapping_state: present # Generally specifies whether a LUN mapping should be present. This is useful when adding a default host for all
+ # volumes. Choices: present, absent
+#eseries_lun_mapping_host: # Default host for all volumes not specifically give a host either in common_volume_configuration or in
+ # eseries_storage_pool_configuration.
+
+
+# Controller iSCSI Interface Port Default Policy Specifications
+# -------------------------------------------------------
+eseries_controller_iscsi_port_state: enabled # Generally specifies whether a controller port definition should be applied Choices: enabled, disabled
+eseries_controller_iscsi_port_config_method: dhcp # General port configuration method definition for both controllers. Choices: static, dhcp
+#eseries_controller_iscsi_port_gateway: # General port IPv4 gateway for both controllers.
+#eseries_controller_iscsi_port_subnet_mask: # General port IPv4 subnet mask for both controllers.
+eseries_controller_iscsi_port_mtu: 9000 # General port maximum transfer units (MTU) for both controllers. Any value greater than 1500 (bytes).
+#eseries_controller_iscsi_port:
+# controller_a: # Controller A port definition.
+# state: # General definitions for all ports on controller A. Any option specified in the ports definition can be
+ # specified here to generalize for all controller A ports. Choices: enabled, disabled
+# config_method: # Port configuration method Choices: static, dhcp
+# address: # Port IPv4 address
+# gateway: # Port IPv4 gateway
+# subnet_mask: # Port IPv4 subnet_mask
+# mtu: # Port IPv4 mtu
+# ports: # List containing ports definitions
+# - state: # Whether the port should be enabled. Choices: enabled, disabled
+# config_method: # Port configuration method Choices: static, dhcp
+# address: # Port IPv4 address
+# gateway: # Port IPv4 gateway
+# subnet_mask: # Port IPv4 subnet_mask
+# controller_b: # Controller B port definition.
+# (...) # Same as controller A but for controller B
+
+
+
+# Controller InfiniBand iSER Interface Channel
+# --------------------------------------------
+#eseries_controller_ib_iser_port:
+# controller_a: # Ordered list of controller A channel address definition.
+# - # Port IPv4 address for channel 1
+# - (...) # So on and so forth
+# controller_b: # Ordered list of controller B channel address definition.
+
+# Controller NVMe over InfiniBand Interface Channel
+# -------------------------------------------------
+#eseries_controller_nvme_ib_port:
+# controller_a: # Ordered list of controller A channel address definition.
+# - # Port IPv4 address for channel 1
+# - (...) # So on and so forth
+# controller_b: # Ordered list of controller B channel address definition.
+
+# Controller NVMe RoCE Interface Port Default Policy Specifications
+# -------------------------------------------------------
+eseries_controller_nvme_roce_port_state: enabled # Generally specifies whether a controller port definition should be applied Choices: enabled, disabled
+eseries_controller_nvme_roce_port_config_method: dhcp # General port configuration method definition for both controllers. Choices: static, dhcp
+#eseries_controller_nvme_roce_port_gateway: # General port IPv4 gateway for both controllers.
+#eseries_controller_nvme_roce_port_subnet_mask: # General port IPv4 subnet mask for both controllers.
+eseries_controller_nvme_roce_port_mtu: 4200 # General port maximum transfer units (MTU). Any value greater than 1500 (bytes).
+eseries_controller_nvme_roce_port_speed: auto # General interface speed. Value must be a supported speed or auto for automatically negotiating the speed with the port.
+#eseries_controller_nvme_roce_port:
+# controller_a: # Controller A port definition.
+# state: # General definitions for all ports on controller A. Any option specified in the ports definition can be
+ # specified here to generalize for all controller A ports.
+# config_method: # Port configuration method Choices: static, dhcp
+# address: # Port IPv4 address
+# gateway: # Port IPv4 gateway
+# subnet_mask: # Port IPv4 subnet_mask
+# mtu: # Port IPv4 mtu
+# speed: # Port IPv4 speed
+# ports: # List containing ports definitions
+# - channel: # Channel of the port to modify. This will be a numerical value that represents the port; typically read
+ # left to right on the HIC.
+# state: # Whether the port should be enabled.
+# config_method: # Port configuration method Choices: static, dhcp
+# address: # Port IPv4 address
+# gateway: # Port IPv4 gateway
+# subnet_mask: # Port IPv4 subnet_mask
+# controller_b: # Controller B port definition.
+# state: # General definitions for all ports on controller B. Any option specified in the ports definition can be
+ # specified here to generalize for all controller A ports.
+# config_method: # Port configuration method Choices: static, dhcp
+# address: # Port IPv4 address
+# gateway: # Port IPv4 gateway
+# subnet_mask: # Port IPv4 subnet_mask
+# mtu: # Port IPv4 mtu
+# speed: # Port IPv4 speed
+# ports: # List containing ports definitions
+# - channel: # Channel of the port to modify. This will be a numerical value that represents the port; typically read
+ # left to right on the HIC.
+# state: # Whether the port should be enabled.
+# config_method: # Port configuration method Choices: static, dhcp
+# address: # Port IPv4 address
+# gateway: # Port IPv4 gateway
+# subnet_mask: # Port IPv4 subnet_mask
+
+
+# Target Discovery Default Policy Specifications
+# ----------------------------------------------
+#eseries_target_name: # iSCSI target name
+eseries_target_ping: True # Enables ICMP ping response from the configured iSCSI ports (boolean)
+eseries_target_unnamed_discovery: True # Whether the iSCSI target iqn should be returned when an initiator performs a discovery session.
+eseries_target_chap_secret: # iSCSI chap secret. When left blank, the chap secret will be removed from the storage system.
+eseries_target_chap_secret_update: False # DO NOT REMOVE! Since netapp_e_iscsi_target cannot compare the chap secret with the current and chooses to always
+ # return changed=True, this flag is used to force the module to update the chap secret. It is preferable to
+ # leave this value False and to add the --extra-vars "eseries_target_chap_secret_update=True".
+
+# Host Default Policy Specifications
+# ----------------------------------
+eseries_host_force_port: True # Default for whether ports are to be allowed to be re-assigned (boolean)
+eseries_host_remove_unused_hostgroup: True # Forces any unused groups to be removed
+#eseries_host_object:
+# - name: # Host label as referenced by the storage array.
+# state:
+# host_type: # Windows (non-clustering) 1, Windows (clustering) 6, Vmware 10, Linux (using kernal 3.10 and later) 28
+# group: # Host's host group
+# ports: # List of port definitions
+# - type: # Port protocol definition (iscsi, fc, sas, ib, nvme)
+# label: # Arbitrary port label
+# port: # Port initiator (iqn, wwn, etc)
+
+
+# Storage Pool-Volume Default Policy Specifications
+# -------------------------------------------------
+# Name schemes: Storage pool and volume names can be used to specify a naming scheme to produce a list of storage pools and volumes. The scheme are defined by
+# brackets and can be used to specify a range of lowercase letters, uppercase letters, range of single digit numbers, any top-level inventory
+# variables, and the current defined storage pool (volume only).
+
+#eseries_storage_pool_configuration:
+# - name: # Name or name scheme (see above) for the storage pool.
+# state: # Specifies whether the storage pool should exist (present, absent). When removing an existing storage array all of the
+ # volumes must be defined with state=absent.
+# raid_level # Volume group raid level. Choices: raid0, raid1, raid5, raid6, raidDiskPool (Default: raidDiskPool)
+# secure_pool: no # Default for storage pool drive security. This flag will enable the security at rest feature. There must be sufficient FDE
+ # or FIPS security capable drives. Type: boolean
+# criteria_drive_count: # Default storage pool drive count.
+# reserve_drive_count: # Default reserve drive count for drive reconstruction for storage pools using dynamic disk pool and the raid level must be
+ # set for raidDiskPool.
+# criteria_min_usable_capacity: # Default minimum required capacity for storage pools.
+# criteria_drive_type: # Default drive type for storage pools. Choices: hdd, ssd
+# criteria_size_unit: gb # Default unit size for all storage pool related sizing. Choices: bytes, b, kb, mb, gb, tb, pb, eb, zb, yb
+# criteria_drive_min_size: # Default minimum drive size for storage pools.
+# criteria_drive_require_da: # Default for whether storage pools are required to have data assurance (DA) compatible drives. Type: boolean
+# criteria_drive_require_fde: # Default for whether storage pools are required to have drive security compatible drives. Type: boolean
+# usable_drives: # Ordered list of <tray_number>:<drive_slot> strings for drive candidates. This is useful to control
+ # drive selections.
+# remove_volumes: # Default policy for deleting volumes prior to removing storage pools.
+# erase_secured_drives: # Default policy for erasing the content drives during create and delete storage pool operations. Type: boolean
+# common_volume_configuration: # Any option that can be specified at the volume level can be generalized here at the storage pool level. This is useful when
+ # all volumes share common configuration definitions.
+# volumes: # List of volumes associated the storage pool.
+# - state: # Specifies whether the volume should exist (present, absent)
+# name: # (required) Name or name scheme (see above) for the volume(s) to be created in the storage pool(s)
+# host: # host or host group for the volume should be mapped to.
+# host_type: # Only required when using something other than Linux kernel 3.10 or later with DM-MP (Linux DM-MP),
+ # non-clustered Windows (Windows), or the storage system default host type is incorrect. Common definitions below:
+ # - AIX MPIO: The Advanced Interactive Executive (AIX) OS and the native MPIO driver
+ # - AVT 4M: Silicon Graphics, Inc. (SGI) proprietary multipath driver; refer to the SGI installation documentation for more information
+ # - HP-UX: The HP-UX OS with native multipath driver
+ # - Linux ATTO: The Linux OS and the ATTO Technology, Inc. driver (must use ATTO FC HBAs)
+ # - Linux DM-MP: The Linux OS and the native DM-MP driver
+ # - Linux Pathmanager: The Linux OS and the SGI proprietary multipath driver; refer to the SGI installation documentation for more information
+ # - Mac: The Mac OS and the ATTO Technology, Inc. driver
+ # - ONTAP: FlexArray
+ # - Solaris 11 or later: The Solaris 11 or later OS and the native MPxIO driver
+ # - Solaris 10 or earlier: The Solaris 10 or earlier OS and the native MPxIO driver
+ # - SVC: IBM SAN Volume Controller
+ # - VMware: ESXi OS
+ # - Windows: Windows Server OS and Windows MPIO with a DSM driver
+ # - Windows Clustered: Clustered Windows Server OS and Windows MPIO with a DSM driver
+ # - Windows ATTO: Windows OS and the ATTO Technology, Inc. driver
+# size: # Size of the volume or presented size of the thinly provisioned volume.
+# size_unit: # Unit size for the size, thin_volume_repo_size, and thin_volume_max_repo_size
+ # Choices: bytes, b, kb, mb, gb, tb, pb, eb, zb, yb
+# segment_size_kb: # Indicates the amount of data stored on a drive before moving on to the next drive in the volume group. Does not apply to pool volumes.
+# thin_provision: # Whether volumes should be thinly provisioned.
+# thin_volume_repo_size: # Actually allocated space for thinly provisioned volumes.
+# thin_volume_max_repo_size: # Maximum allocated space allowed for thinly provisioned volumes.
+# thin_volume_expansion_policy: # Thin volume expansion policy. Choices: automatic, manual
+# thin_volume_growth_alert_threshold: # Thin volume growth alert threshold; this is the threshold for when the thin volume expansion
+ # policy will be enacted. Allowable values are between and including 10% and 99%
+# ssd_cache_enabled: # Enables ssd cache which will enable the volume to use an existing SSD cache on the storage array.
+# data_assurance_enabled: # Enables whether data assurance(DA) is required to be enabled.
+# read_cache_enable: # Enables read caching which will cache all read requests.
+# read_ahead_enable: # Enables read ahead caching; this is good for sequential workloads to cache subsequent blocks.
+# write_cache_enable: # Enables write caching which will cache all writes.
+# workload_name: # Name of the volume's workload. This can be defined using the metadata option or, if already defined, specify one already
+ # created on the storage array.
+# metadata: # Dictionary containing arbitrary entries normally used for defining the volume(s) workload.
+# wait_for_initialization: # Whether volume creation with wait for initialization to complete
+
+# Snapshot Consistency Group Default Policy Specifications
+# --------------------------------------------------------
+eseries_snapshot_remove_unspecified: false # Whether to remove any snapshot group or view that is not specified (Default: false).
+#eseries_snapshot_groups_maximum_snapshots: # Default maximum point-in-time snapshot images (Default: 32).
+#eseries_snapshot_groups_reserve_capacity_pct: # Default reserve capacity percentage (Default: 40)
+#eseries_snapshot_groups_preferred_reserve_storage_pool: # Preferred storage pool or volume group for the reserve capacity volume.
+#eseries_snapshot_groups_reserve_capacity_full_policy: # Default full reserve capacity policy (Default: purge). Choices: [purge, reject]
+#eseries_snapshot_groups_alert_threshold_pct: # Default reserve capacity percentage full to alert administrators (Default 75).
+#eseries_snapshot_groups:
+# - name: # Name of snapshot consistency group.
+# maximum_snapshots: # Maximum allowed snapshot point-in-time images for consistency group (Default: 32).
+# reserve_capacity_pct: # Reserve capacity measured as a percentage of the base volume (Default: 40). Reserve capacity can be expanded
+# # and trimmed; however, the trim operation requires there be no base volume snapshots images in the group.
+# reserve_capacity_full_policy: # Policy to implement when reserve capacity is full (Default: purge). Choices [purge, reject]
+# alert_threshold_pct: # Reserve capacity full alert threshold for storage system administrators (Default: 75).
+# rollback_priority: # Storage system priority for base volume rollback (Default: medium). Choices [lowest, low, medium, high, highest]
+# volumes: # Information for each volume in the consistency group.
+# - volume: # Base volume name
+# reserve_capacity_pct: # Reserve capacity measured as a percentage of the base volume (Default: 40). Reserve capacity can be expanded
+ # and trimmed; however, the trim operation requires there be no base volume snapshots images in the group.
+# preferred_reserve_storage_pool: # Preferred reserve capacity storage pool or volume group. This will default to the base volume's
+ # storage pool or volume group. The reserve capacity volume cannot be changed once created.
+# - (...)
+
+#eseries_snapshot_views_host: # Default host or host group to map all snapshot volumes.
+#eseries_snapshot_views_reserve_capacity_pct: # Default reserve capacity percentage (Default: 40)
+#eseries_snapshot_views_preferred_reserve_storage_pool: # Preferred storage pool or volume group for the reserve capacity volume.
+#eseries_snapshot_views_alert_threshold_pct: # Default reserve capacity percentage full to alert administrators (Default 75).
+#eseries_snapshot_views_writable: # Default for whether to make snapshot volumes writable.
+#eseries_snapshot_views_validate: # Default for whether to validate snapshot volumes after creation.
+#eseries_snapshot_views:
+# - volume: # Consistency group's snapshot view's name.
+# group_name: # Snapshot consistency group's name.
+# pit_name: # Point-in-time snapshot images group name. (Only available when specified using Ansible na_santricity_module (via direct or role)
+# pit_timestamp: # Point-in-time snapshot images group timestamp. Snapshot image timestamp in the YYYY-MM-DD HH:MM:SS (AM|PM) (hours, minutes, seconds, and day-period are optional)
+# host: # Host or host group to map snapshot volumes.
+# writable: # Whether snapshot volume of base volume images should be writable.
+# validate: # Whether snapshot volume should be validated which includes both a media scan and parity validation.
+# reserve_capacity_pct: # Percentage of base volume capacity to reserve for snapshot copy-on-writes (COW). Only used when snapshot volume is writable.
+# preferred_reserve_storage_pool: # Preferred storage pool or volume group for the reserve capacity volume.
+# alert_threshold: # Reserve capacity percentage full to alert administrators
+# volumes: # (Optional) Select subset of volumes within the snapshot consistency group.
+# - name: # Name of volume within consistency group.
+# host: # Host or host group to map snapshot volumes.
+# writable: # Whether snapshot volume of base volume images should be writable.
+# validate: # Whether snapshot volume should be validated which includes both a media scan and parity validation.
+# reserve_capacity_pct: # Percentage of base volume capacity to reserve for snapshot copy-on-writes (COW). Only used when snapshot volume is writable.
+# preferred_reserve_storage_pool: # Preferred storage pool or volume group for the reserve capacity volume.
+
+#eseries_snapshot_rollback_priority: medium # Default point-in-time rollback priority (Default: medium). Choices [lowest, low, medium, high, highest]
+#eseries_snapshot_rollback_backup: true # Default whether snapshot should be taken prior to rolling back base volumes (Default: true).
+#eseries_snapshot_rollbacks:
+# - group_name: # Snapshot consistency group's name.
+# pit_name: # Point-in-time snapshot images group name (Only available when specified using Ansible na_santricity_module (via direct or role)
+# pit_timestamp: # Point-in-time snapshot images group timestamp. Snapshot image timestamp in the YYYY-MM-DD HH:MM:SS (AM|PM) (hours, minutes, seconds, and day-period are optional)
+# rollback_priority: # Storage system priority for base volume rollback (Default: medium). Choices [lowest, low, medium, high, highest]
+# rollback_backup: # Whether to create point-in-time snapshot images of the consistency group prior to rollback.
+# volumes: # (Optional) Select subset of volume names within the snapshot consistency group to rollback.
+
+# Initiator-Target Protocol Variable Defaults
+# -------------------------------------------
+# Note that the following commands produce a unique list of IQNs or WWNs of the interfaces used, line separated.
+eseries_initiator_protocol: # This variable defines which protocol the storage array will use. Choices: iscsi, fc, sas, ib_iser, ib_srp, nvme_ib, nvme_fc, nvme_roce
+eseries_initiator_command:
+ fc:
+ linux: "cat /sys/class/fc_host/host*/port_name | sort | uniq"
+ windows: "(Get-InitiatorPort | Where-Object -P ConnectionType -EQ 'Fibre Channel' | Select-Object -Property PortAddress |
+ Format-Table -AutoSize -HideTableHeaders | Out-String).trim()"
+ iscsi:
+ linux: "grep -o iqn.* /etc/iscsi/initiatorname.iscsi"
+ windows: "(get-initiatorPort | select-object -property nodeaddress | sort-object | get-unique | ft -autoSize | out-string -stream |
+ select-string iqn | out-string).trim()"
+ sas:
+ # NetApp IMT for SAS attached E-Series SAN hosts recommends adding all possible SAS addresses with the base address
+ # starting at 0, and the last address ending in 3 for single port HBAs, or 7 for dual port HBAs. Since determining
+ # single vs . dual port HBAs adds complexity, we always add all 8 possible permutations of the SAS address.
+ linux: "cat /sys/class/sas_host/host*/device/scsi_host/*/host_sas_address | sort | uniq"
+ windows: "(Get-InitiatorPort | Where-Object -P ConnectionType -EQ 'SAS' | Select-Object -Property PortAddress | Format-Table -AutoSize -HideTableHeaders | Out-String).trim()"
+ ib_iser:
+ linux: "grep -o iqn.* /etc/iscsi/initiatorname.iscsi"
+ windows: "" # add windows command for determining host iqn address(es)
+ ib_srp:
+ linux: "for fp in /sys/class/infiniband/*/ports/*/gids/*; do out=`cat $fp | tr -d :`; port=`expr substr $out 17 32`; if [ $port != 0000000000000000 ]; then echo 0x$port; fi; done | sort | uniq"
+ windows: "" # add windows command for determining host guid
+ nvme_ib:
+ linux: "grep -o nqn.* /etc/nvme/hostnqn"
+ windows: "" # add windows command for determining host nqn address(es)
+ nvme_fc:
+ linux: "grep -o nqn.* /etc/nvme/hostnqn"
+ windows: "" # add windows command for determining host nqn address(es)
+ nvme_roce:
+ linux: "grep -o nqn.* /etc/nvme/hostnqn"
+ windows: "" # add windows command for determining host nqn address(es)
+
+eseries_iscsi_iqn_prefix: "iqn.2005-03.org.open-iscsi:" # Last 12 characters from host machine-id (uuid) will be appended to the prefix if the host does not have a required iqn.
+eseries_nvme_nqn_prefix: "nqn.2014-08.org.nvmexpress:uuid:" # Host machine-id (uuid) will be appended to the prefix if the host does not have a required nqn.
+eseries_protocols_using_eseries_iscsi_iqn: ["iscsi", "ib_iser"]
+eseries_protocols_using_eseries_nvme_nqn: ["nvme_ib", "nvme_fc", "nvme_roce"]
+eseries_iscsi_iqn_path: "/etc/iscsi/" #initiatorname.iscsi"
+eseries_nvme_nqn_path: "/etc/nvme/" #hostnqn"
+
+eseries_remove_all_configuration: False # WARNING!!! This flag will force any storage pool, volume, host, hostgroup and mapping to be absent. Choices: True, False \ No newline at end of file
diff --git a/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/meta/main.yml b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/meta/main.yml
new file mode 100644
index 000000000..05169af14
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/meta/main.yml
@@ -0,0 +1,19 @@
+galaxy_info:
+ author: Nathan Swartz (@ndswartz)
+ description: Manages NetApp E-Series storage system's interfaces, storage pools, volumes, hosts, hostgroups, and volume mappings.
+ company: NetApp, Inc
+ license: BSD-3 Clause
+ platforms:
+ min_ansible_version: 2.13
+ galaxy_tags:
+ - netapp
+ - eseries
+ - storage
+ - iscsi
+ - sas
+ - fc
+ - infiniband
+ - ib
+ - nvme
+
+dependencies: []
diff --git a/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/tasks/initiator.yml b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/tasks/initiator.yml
new file mode 100644
index 000000000..4d40051d5
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/tasks/initiator.yml
@@ -0,0 +1,114 @@
+---
+# netapp_eseries_iscsi/tasks/initiator.yml
+
+- name: Collect facts on the storage array
+ na_santricity_facts:
+ ssid: "{{ current_eseries_ssid }}"
+ api_url: "{{ current_eseries_api_url }}"
+ api_username: "{{ current_eseries_api_username }}"
+ api_password: "{{ current_eseries_api_password }}"
+ validate_certs: "{{ current_eseries_validate_certs | default(omit) }}"
+ connection: local
+ register: storage_array_facts
+ when: eseries_host_object is defined
+
+- name: Organize current and expected hostgroups
+ set_fact:
+ current_storage_array_hostgroups: |-
+ {%- set current_hostgroup={} %}
+ {%- for group in storage_array_facts["storage_array_facts"]["netapp_host_groups"] %}
+ {%- if current_hostgroup.update( {group["name"]: []} ) %}{%- endif %}
+ {%- for host in storage_array_facts["storage_array_facts"]["netapp_hosts"] %}
+ {%- if group["id"] == host["group_id"] and current_hostgroup[group["name"]].append(host["name"]) %}{%- endif %}
+ {%- endfor %}
+ {%- endfor %}
+ {{ current_hostgroup }}
+ hostgroups: |-
+ {%- set hostgroups=[] %}
+ {%- for host in eseries_host_object %}
+ {%- if "group" in (host.keys()|list) and host["group"] not in hostgroups and
+ ("state" not in (host.keys()|list) or host["state"] == "present") and hostgroups.append(host["group"]) %}
+ {%- endif %}
+ {%- endfor %}
+ {{ hostgroups }}
+ when: eseries_host_object is defined
+
+- name: "{{'Configure' if (eseries_remove_all_configuration_state | default('present')) == 'present' else 'Unconfigure' }} inventory-defined hostgroup definitions"
+ na_santricity_hostgroup:
+ ssid: "{{ current_eseries_ssid }}"
+ api_url: "{{ current_eseries_api_url }}"
+ api_username: "{{ current_eseries_api_username }}"
+ api_password: "{{ current_eseries_api_password }}"
+ validate_certs: "{{ current_eseries_validate_certs | default(omit) }}"
+ state: "{{ eseries_remove_all_configuration_state | default('present') }}"
+ name: "{{ item }}"
+ hosts: "{{ current_storage_array_hostgroups[item] | default(omit) }}"
+ connection: local
+ loop: "{{ lookup('list', hostgroups) }}"
+ when: eseries_host_object is defined and (hostgroups | length > 0)
+
+- name: "{{'Configure' if (eseries_remove_all_configuration_state | default('present')) == 'present' else 'Unconfigure' }} inventory-defined host definitions"
+ na_santricity_host:
+ ssid: "{{ current_eseries_ssid }}"
+ api_url: "{{ current_eseries_api_url }}"
+ api_username: "{{ current_eseries_api_username }}"
+ api_password: "{{ current_eseries_api_password }}"
+ validate_certs: "{{ current_eseries_validate_certs | default(omit) }}"
+ state: "{{ eseries_remove_all_configuration_state | default(item['state'] | default(omit)) }}"
+ name: "{{ item['name'] }}"
+ ports: "{{ item['ports'] | default(omit) }}"
+ force_port: "{{ item['force_port'] | default(eseries_host_force_port | default(omit)) }}"
+ group: "{{ item['group'] | default(eseries_host_group | default(omit)) }}"
+ host_type: "{{ item['host_type_index'] | default(item['host_type'] | default(eseries_host_type_index | default(omit))) }}"
+ connection: local
+ loop: "{{ lookup('list', eseries_host_object) }}"
+ when: eseries_host_object is defined
+
+- name: Collect facts on the storage array
+ na_santricity_facts:
+ ssid: "{{ current_eseries_ssid }}"
+ api_url: "{{ current_eseries_api_url }}"
+ api_username: "{{ current_eseries_api_username }}"
+ api_password: "{{ current_eseries_api_password }}"
+ validate_certs: "{{ current_eseries_validate_certs | default(omit) }}"
+ connection: local
+ register: storage_array_facts
+ when: eseries_host_object is defined
+
+- name: Update the current storage array hostgroups
+ set_fact:
+ current_storage_array_hostgroups: |-
+ {%- set current_hostgroup={} %}
+ {%- for group in storage_array_facts["storage_array_facts"]["netapp_host_groups"] %}
+ {%- if current_hostgroup.update( {group["name"]: []} ) %}{%- endif %}
+ {%- for host in storage_array_facts["storage_array_facts"]["netapp_hosts"] %}
+ {%- if group["id"] == host["group_id"] and current_hostgroup[group["name"]].append(host["name"]) %}{%- endif %}
+ {%- endfor %}
+ {%- endfor %}
+ {{ current_hostgroup }}
+ when: eseries_host_object is defined
+
+- name: Create list of unused hostgroups
+ set_fact:
+ unused_hostgroups: |-
+ {%- set unused_hostgroup = [] %}
+ {%- for current_host_name in (current_storage_array_hostgroups.keys()|list) %}
+ {%- if (current_storage_array_hostgroups[current_host_name]|length==0) and unused_hostgroup.append(current_host_name) %}{%- endif %}
+ {%- endfor %}
+ {{ unused_hostgroup }}
+ when: eseries_host_object is defined
+
+- name: "{{'Configure' if (eseries_remove_all_configuration_state | default('present')) == 'present' else 'Unconfigure' }} inventory-defined hostgroup definitions"
+ na_santricity_hostgroup:
+ ssid: "{{ current_eseries_ssid }}"
+ api_url: "{{ current_eseries_api_url }}"
+ api_username: "{{ current_eseries_api_username }}"
+ api_password: "{{ current_eseries_api_password }}"
+ validate_certs: "{{ current_eseries_validate_certs | default(omit) }}"
+ state: absent
+ name: "{{ item }}"
+ hosts: "{{ current_storage_array_hostgroups[item] | default(omit) }}"
+ connection: local
+ loop: "{{ lookup('list', unused_hostgroups) }}"
+ when: "eseries_host_object is defined and (unused_hostgroups|length>0) and eseries_host_remove_unused_hostgroup is
+ defined and eseries_host_remove_unused_hostgroup"
diff --git a/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/tasks/interface.yml b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/tasks/interface.yml
new file mode 100644
index 000000000..673084908
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/tasks/interface.yml
@@ -0,0 +1,27 @@
+- name: "{{'Configure' if (eseries_remove_all_configuration_state | default('present')) == 'present' else 'Unconfigure' }} controllers iSCSI interface ports"
+ import_tasks: interface/iscsi.yml
+ delegate_to: localhost
+ when: eseries_initiator_protocol == "iscsi"
+ tags:
+ - iscsi
+
+- name: "{{'Configure' if (eseries_remove_all_configuration_state | default('present')) == 'present' else 'Unconfigure' }} controllers InfiniBand iSER interface ports"
+ import_tasks: interface/ib_iser.yml
+ delegate_to: localhost
+ when: eseries_initiator_protocol == "ib_iser"
+ tags:
+ - ib_iser
+
+- name: "{{'Configure' if (eseries_remove_all_configuration_state | default('present')) == 'present' else 'Unconfigure' }} controllers NVMe interface ports over InfiniBand"
+ import_tasks: interface/nvme_ib.yml
+ delegate_to: localhost
+ when: eseries_initiator_protocol == "nvme_ib"
+ tags:
+ - nvme_ib
+
+- name: "{{'Configure' if (eseries_remove_all_configuration_state | default('present')) == 'present' else 'Unconfigure' }} controllers NVMe interface ports on RoCE"
+ import_tasks: interface/nvme_roce.yml
+ delegate_to: localhost
+ when: eseries_initiator_protocol == "nvme_roce"
+ tags:
+ - nvme_roce
diff --git a/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/tasks/interface/ib_iser.yml b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/tasks/interface/ib_iser.yml
new file mode 100644
index 000000000..01d253647
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/tasks/interface/ib_iser.yml
@@ -0,0 +1,29 @@
+- name: "{{'Configure' if (eseries_remove_all_configuration_state | default('present')) == 'present' else 'Unconfigure' }} controller A inventory-defined controller port definitions for InfiniBand iSER"
+ na_santricity_ib_iser_interface:
+ ssid: "{{ current_eseries_ssid }}"
+ api_url: "{{ current_eseries_api_url }}"
+ api_username: "{{ current_eseries_api_username }}"
+ api_password: "{{ current_eseries_api_password }}"
+ validate_certs: "{{ current_eseries_validate_certs | default(omit) }}"
+ controller: A
+ channel: "{{ channel + 1 }}"
+ address: "{{ item }}"
+ loop: "{{ lookup('list', eseries_controller_ib_iser_port['controller_a']) }}"
+ loop_control:
+ index_var: channel
+ when: eseries_controller_ib_iser_port is defined and eseries_controller_ib_iser_port['controller_a'] is defined
+
+- name: "{{'Configure' if (eseries_remove_all_configuration_state | default('present')) == 'present' else 'Unconfigure' }} controller B inventory-defined controller port definitions for InfiniBand iSER"
+ na_santricity_ib_iser_interface:
+ ssid: "{{ current_eseries_ssid }}"
+ api_url: "{{ current_eseries_api_url }}"
+ api_username: "{{ current_eseries_api_username }}"
+ api_password: "{{ current_eseries_api_password }}"
+ validate_certs: "{{ current_eseries_validate_certs | default(omit) }}"
+ controller: B
+ channel: "{{ channel + 1 }}"
+ address: "{{ item }}"
+ loop: "{{ lookup('list', eseries_controller_ib_iser_port['controller_b']) }}"
+ loop_control:
+ index_var: channel
+ when: eseries_controller_ib_iser_port is defined and eseries_controller_ib_iser_port['controller_b'] is defined
diff --git a/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/tasks/interface/iscsi.yml b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/tasks/interface/iscsi.yml
new file mode 100644
index 000000000..7bc047ec5
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/tasks/interface/iscsi.yml
@@ -0,0 +1,56 @@
+- name: "{{'Configure' if (eseries_remove_all_configuration_state | default('present')) == 'present' else 'Unconfigure' }} controller A inventory-defined controller port definitions for iSCSI"
+ na_santricity_iscsi_interface:
+ ssid: "{{ current_eseries_ssid }}"
+ api_url: "{{ current_eseries_api_url }}"
+ api_username: "{{ current_eseries_api_username }}"
+ api_password: "{{ current_eseries_api_password }}"
+ validate_certs: "{{ current_eseries_validate_certs | default(omit) }}"
+ state: "{{ item['state'] | default(eseries_controller_iscsi_port_state | default(omit)) }}"
+ port: "{{ port + 1 }}"
+ controller: A
+ config_method: "{{ item['config_method'] | default(eseries_controller_iscsi_port_config_method | default(omit)) }}"
+ address: "{{ item['address'] | default(omit) }}"
+ gateway: "{{ item['gateway'] | default(eseries_controller_iscsi_port_gateway | default(omit)) }}"
+ subnet_mask: "{{ item['subnet_mask'] | default(eseries_controller_iscsi_port_subnet_mask | default(omit)) }}"
+ mtu: "{{ item['mtu'] | default(eseries_controller_iscsi_port_mtu | default(omit)) }}"
+ speed: "{{ item['speed'] | default(eseries_controller_iscsi_port_speed | default(omit)) }}"
+ loop: "{{ lookup('list', eseries_controller_iscsi_port['controller_a']) }}"
+ loop_control:
+ index_var: port
+ when: eseries_controller_iscsi_port is defined and eseries_controller_iscsi_port['controller_a'] is defined
+
+- name: "{{'Configure' if (eseries_remove_all_configuration_state | default('present')) == 'present' else 'Unconfigure' }} controller B inventory-defined controller port definitions for iSCSI"
+ na_santricity_iscsi_interface:
+ ssid: "{{ current_eseries_ssid }}"
+ api_url: "{{ current_eseries_api_url }}"
+ api_username: "{{ current_eseries_api_username }}"
+ api_password: "{{ current_eseries_api_password }}"
+ validate_certs: "{{ current_eseries_validate_certs | default(omit) }}"
+ state: "{{ item['state'] | default(eseries_controller_iscsi_port_state | default(omit)) }}"
+ port: "{{ port + 1 }}"
+ controller: B
+ config_method: "{{ item['config_method'] | default(eseries_controller_iscsi_port_config_method | default(omit)) }}"
+ address: "{{ item['address'] | default(omit) }}"
+ gateway: "{{ item['gateway'] | default(eseries_controller_iscsi_port_gateway | default(omit)) }}"
+ subnet_mask: "{{ item['subnet_mask'] | default(eseries_controller_iscsi_port_subnet_mask | default(omit)) }}"
+ mtu: "{{ item['mtu'] | default(eseries_controller_iscsi_port_mtu | default(omit)) }}"
+ speed: "{{ item['speed'] | default(eseries_controller_iscsi_port_speed | default(omit)) }}"
+ loop: "{{ lookup('list', eseries_controller_iscsi_port['controller_b']) }}"
+ loop_control:
+ index_var: port
+ when: eseries_controller_iscsi_port is defined and eseries_controller_iscsi_port['controller_b'] is defined
+
+- name: "{{'Configure' if (eseries_remove_all_configuration_state | default('present')) == 'present' else 'Unconfigure' }} iSCSI discovery parameters"
+ na_santricity_iscsi_target:
+ ssid: "{{ current_eseries_ssid }}"
+ api_url: "{{ current_eseries_api_url }}"
+ api_username: "{{ current_eseries_api_username }}"
+ api_password: "{{ current_eseries_api_password }}"
+ validate_certs: "{{ current_eseries_validate_certs | default(omit) }}"
+ name: "{{ eseries_iscsi_target_name | default(omit) }}"
+ chap_secret: "{%- if eseries_iscsi_target_chap_secret_update %}{{ eseries_iscsi_target_chap_secret }}{%- endif %}"
+ ping: "{{ eseries_iscsi_target_ping | default(omit) }}"
+ unnamed_discovery: "{{ eseries_iscsi_target_unnamed_discovery | default(omit) }}"
+ when: ((eseries_iscsi_target_chap_secret is defined and eseries_iscsi_target_chap_secret_update) or
+ eseries_iscsi_target_name is defined or eseries_iscsi_target_ping is defined or
+ eseries_iscsi_target_unnamed_discovery is defined)
diff --git a/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/tasks/interface/nvme_ib.yml b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/tasks/interface/nvme_ib.yml
new file mode 100644
index 000000000..d39e3b504
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/tasks/interface/nvme_ib.yml
@@ -0,0 +1,29 @@
+- name: "{{'Configure' if (eseries_remove_all_configuration_state | default('present')) == 'present' else 'Unconfigure' }} controller A inventory-defined controller port definitions for NVMe interface ports over InfiniBand"
+ na_santricity_nvme_interface:
+ ssid: "{{ current_eseries_ssid }}"
+ api_url: "{{ current_eseries_api_url }}"
+ api_username: "{{ current_eseries_api_username }}"
+ api_password: "{{ current_eseries_api_password }}"
+ validate_certs: "{{ current_eseries_validate_certs | default(omit) }}"
+ controller: A
+ channel: "{{ channel + 1 }}"
+ address: "{{ item }}"
+ loop: "{{ lookup('list', eseries_controller_nvme_ib_port['controller_a']) }}"
+ loop_control:
+ index_var: channel
+ when: eseries_controller_nvme_ib_port is defined and eseries_controller_nvme_ib_port['controller_a'] is defined
+
+- name: "{{'Configure' if (eseries_remove_all_configuration_state | default('present')) == 'present' else 'Unconfigure' }} controller B inventory-defined controller port definitions for NVMe interface ports over InfiniBand"
+ na_santricity_nvme_interface:
+ ssid: "{{ current_eseries_ssid }}"
+ api_url: "{{ current_eseries_api_url }}"
+ api_username: "{{ current_eseries_api_username }}"
+ api_password: "{{ current_eseries_api_password }}"
+ validate_certs: "{{ current_eseries_validate_certs | default(omit) }}"
+ controller: B
+ channel: "{{ channel + 1 }}"
+ address: "{{ item }}"
+ loop: "{{ lookup('list', eseries_controller_nvme_ib_port['controller_b']) }}"
+ loop_control:
+ index_var: channel
+ when: eseries_controller_nvme_ib_port is defined and eseries_controller_nvme_ib_port['controller_b'] is defined
diff --git a/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/tasks/interface/nvme_roce.yml b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/tasks/interface/nvme_roce.yml
new file mode 100644
index 000000000..fae92f8a6
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/tasks/interface/nvme_roce.yml
@@ -0,0 +1,41 @@
+- name: "{{'Configure' if (eseries_remove_all_configuration_state | default('present')) == 'present' else 'Unconfigure' }} controller A inventory-defined controller port definitions for NVMe interface ports on RoCE"
+ na_santricity_nvme_interface:
+ ssid: "{{ current_eseries_ssid }}"
+ api_url: "{{ current_eseries_api_url }}"
+ api_username: "{{ current_eseries_api_username }}"
+ api_password: "{{ current_eseries_api_password }}"
+ validate_certs: "{{ current_eseries_validate_certs | default(omit) }}"
+ state: "{{ item['state'] | default(eseries_controller_nvme_roce_port_state | default(omit)) }}"
+ controller: A
+ channel: "{{ channel + 1 }}"
+ config_method: "{{ item['config_method'] | default(eseries_controller_nvme_roce_port_config_method | default(omit)) }}"
+ address: "{{ item['address'] | default(omit) }}"
+ gateway: "{{ item['gateway'] | default(eseries_controller_nvme_roce_port_gateway | default(omit)) }}"
+ subnet_mask: "{{ item['subnet_mask'] | default(eseries_controller_nvme_roce_port_subnet_mask | default(omit)) }}"
+ mtu: "{{ item['mtu'] | default(eseries_controller_nvme_roce_port_mtu | default(omit)) }}"
+ speed: "{{ item['speed'] | default(eseries_controller_nvme_roce_port_speed | default(omit)) }}"
+ loop: "{{ lookup('list', eseries_controller_nvme_roce_port['controller_a']) }}"
+ loop_control:
+ index_var: channel
+ when: eseries_controller_nvme_roce_port is defined and eseries_controller_nvme_roce_port['controller_a'] is defined
+
+- name: "{{'Configure' if (eseries_remove_all_configuration_state | default('present')) == 'present' else 'Unconfigure' }} controller B inventory-defined controller port definitions for NVMe interface ports on RoCE"
+ na_santricity_nvme_interface:
+ ssid: "{{ current_eseries_ssid }}"
+ api_url: "{{ current_eseries_api_url }}"
+ api_username: "{{ current_eseries_api_username }}"
+ api_password: "{{ current_eseries_api_password }}"
+ validate_certs: "{{ current_eseries_validate_certs | default(omit) }}"
+ state: "{{ item['state'] | default(eseries_controller_nvme_roce_port_state | default(omit)) }}"
+ controller: B
+ channel: "{{ channel + 1 }}"
+ config_method: "{{ item['config_method'] | default(eseries_controller_nvme_roce_port_config_method | default(omit)) }}"
+ address: "{{ item['address'] | default(omit) }}"
+ gateway: "{{ item['gateway'] | default(eseries_controller_nvme_roce_port_gateway | default(omit)) }}"
+ subnet_mask: "{{ item['subnet_mask'] | default(eseries_controller_nvme_roce_port_subnet_mask | default(omit)) }}"
+ mtu: "{{ item['mtu'] | default(eseries_controller_nvme_roce_port_mtu | default(omit)) }}"
+ speed: "{{ item['speed'] | default(eseries_controller_nvme_roce_port_speed | default(omit)) }}"
+ loop: "{{ lookup('list', eseries_controller_nvme_roce_port['controller_b']) }}"
+ loop_control:
+ index_var: channel
+ when: eseries_controller_nvme_roce_port is defined and eseries_controller_nvme_roce_port['controller_b'] is defined
diff --git a/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/tasks/lun_mapping.yml b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/tasks/lun_mapping.yml
new file mode 100644
index 000000000..395a5d399
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/tasks/lun_mapping.yml
@@ -0,0 +1,178 @@
+- name: Collect facts on the storage array
+ na_santricity_facts:
+ ssid: "{{ current_eseries_ssid }}"
+ api_url: "{{ current_eseries_api_url }}"
+ api_username: "{{ current_eseries_api_username }}"
+ api_password: "{{ current_eseries_api_password }}"
+ validate_certs: "{{ current_eseries_validate_certs | default(omit) }}"
+ connection: local
+ register: storage_array_facts
+
+- name: Collect volume host and host group list
+ set_fact:
+ hosts: "{{ lookup('netapp_eseries.santricity.santricity_host', hostvars[inventory_hostname],
+ volumes=lookup('netapp_eseries.santricity.santricity_volume', hostvars[inventory_hostname])) }}"
+
+- name: Retrieve required details and map luns
+ block:
+ - name: Collect host/host group networking information
+ setup:
+ gather_subset: min
+ register: hosts_info
+ delegate_to: "{{ item }}"
+ delegate_facts: True
+ loop: "{{ lookup('list', (hosts['expected_hosts'].keys() | list)) }}"
+
+ - name: Collect Ansible defined host and host group interface information
+ set_fact:
+ host_interface_ansible: |-
+ {%- set host_interfaces = [] -%}
+ {%- for host in (hosts['expected_hosts'].keys() | list) -%}
+ {%- if "eseries_iscsi_iqn" in (hostvars[host].keys() | list) -%}
+ {%- if host_interfaces.append({"item": host, "stdout_lines": [hostvars[host]["eseries_iscsi_iqn"]]}) -%}{%- endif -%}
+ {%- elif "eseries_nvme_nqn" in (hostvars[host].keys() | list) -%}
+ {%- if host_interfaces.append({"item": host, "stdout_lines": [hostvars[host]["eseries_nvme_nqn"]]}) -%}{%- endif -%}
+ {%- endif -%}
+ {%- endfor -%}
+ {{ host_interfaces }}
+
+ - name: Collect host and host group interface information for Linux
+ shell: "{{ eseries_initiator_command[eseries_initiator_protocol]['linux'] }}"
+ register: host_interface_linux
+ delegate_to: "{{ item }}"
+ become: True
+ changed_when: False
+ failed_when: "host_interface_linux.rc != 0 and not (eseries_initiator_protocol in eseries_protocols_using_eseries_iscsi_iqn or
+ eseries_initiator_protocol in eseries_protocols_using_eseries_nvme_nqn)"
+ when: '((hostvars[item]["ansible_facts"]["system"] | lower) == "linux" and
+ ("eseries_iscsi_iqn" not in (hostvars[item].keys() | list) or eseries_initiator_protocol not in eseries_protocols_using_eseries_iscsi_iqn) and
+ ("eseries_nvme_nqn" not in (hostvars[item].keys() | list) or eseries_initiator_protocol not in eseries_protocols_using_eseries_nvme_nqn))'
+ loop: "{{ lookup('list', (hosts['expected_hosts'].keys() | list)) }}"
+
+ - name: Collect host and host group interface information for Windows
+ win_shell: "{{ eseries_initiator_command[eseries_initiator_protocol]['windows'] }}"
+ register: host_interface_windows
+ delegate_to: "{{ item }}"
+ changed_when: False
+ failed_when: "host_interface_windows.rc != 0"
+ when: '((hostvars[item]["ansible_facts"]["os_family"] | lower == "windows") and
+ ("eseries_iscsi_iqn" not in (hostvars[item].keys() | list) or eseries_initiator_protocol not in eseries_protocols_using_eseries_iscsi_iqn) and
+ ("eseries_nvme_nqn" not in (hostvars[item].keys() | list) or eseries_initiator_protocol not in eseries_protocols_using_eseries_nvme_nqn))'
+ loop: "{{ lookup('list', (hosts['expected_hosts'].keys() | list)) }}"
+
+ - name: Aggregate host/host group interface information
+ set_fact:
+ host_interface: |-
+ {%- set host_interfaces = [] -%}
+ {%- for host in (hosts['expected_hosts'].keys() | list) -%}
+ {%- set found_interfaces = host_interface_ansible + host_interface_linux['results'] + host_interface_windows['results'] -%}
+ {%- for interface in found_interfaces if interface["item"] == host and "skip_reason" not in (interface.keys() | list) and interface["stdout_lines"] -%}
+ {%- if host_interfaces.append(interface) -%}{%- endif -%}
+ {%- else -%}
+ {%- if eseries_initiator_protocol in eseries_protocols_using_eseries_iscsi_iqn -%}
+ {%- if host_interfaces.append({"item": host, "stdout_lines": [eseries_iscsi_iqn_prefix ~ hostvars[host]["ansible_machine_id"][-12:]], "generate": True}) -%}{%- endif -%}
+ {%- elif eseries_initiator_protocol in eseries_protocols_using_eseries_nvme_nqn -%}
+ {%- if host_interfaces.append({"item": host, "stdout_lines": [eseries_nvme_nqn_prefix ~ hostvars[host]["ansible_machine_id"]], "generate": True}) -%}{%- endif -%} {%- endif -%}
+ {%- endfor -%}
+ {%- endfor -%}
+ {{- host_interfaces -}}
+
+ - name: Generate missing host IQN (Linux only).
+ block:
+ - name: Ensure path for initiatorname.iscsi exists.
+ file:
+ state: directory
+ path: "{{ eseries_iscsi_iqn_path }}"
+ delegate_to: "{{ item['item'] }}"
+ become: true
+ loop: "{{ host_interface }}"
+ when: "(hostvars[item['item']]['ansible_facts']['system'] | lower) == 'linux' and (item['generate'] | default(False)) == True"
+ - name: Generate initiatorname.iscsi file.
+ ansible.builtin.template:
+ src: initiatorname_iscsi.j2
+ dest: "{{ eseries_iscsi_iqn_path }}initiatorname.iscsi"
+ delegate_to: "{{ item['item'] }}"
+ become: true
+ loop: "{{ host_interface }}"
+ when: "(hostvars[item['item']]['ansible_facts']['system'] | lower) == 'linux' and (item['generate'] | default(False)) == True"
+ when: eseries_initiator_protocol in eseries_protocols_using_eseries_iscsi_iqn
+
+ - name: Generate missing host NQN (Linux only).
+ block:
+ - name: Ensure NVMe NQN directory exists on host.
+ file:
+ state: directory
+ path: "{{ eseries_nvme_nqn_path }}"
+ delegate_to: "{{ item['item'] }}"
+ become: true
+ loop: "{{ host_interface }}"
+ when: "(hostvars[item['item']]['ansible_facts']['system'] | lower) == 'linux' and (item['generate'] | default(False)) == True"
+ - name: Generate NVMe NQN for host if one was not discovered.
+ ansible.builtin.template:
+ src: hostnqn.j2
+ dest: "{{ eseries_nvme_nqn_path }}hostnqn"
+ delegate_to: "{{ item['item'] }}"
+ become: true
+ loop: "{{ host_interface }}"
+ when: "(hostvars[item['item']]['ansible_facts']['system'] | lower) == 'linux' and (item['generate'] | default(False)) == True"
+ when: eseries_initiator_protocol in eseries_protocols_using_eseries_nvme_nqn
+
+ - name: Organize host/host group interface information and update hosts variable
+ set_fact:
+ hosts: "{{ lookup('netapp_eseries.santricity.santricity_host_detail',
+ hosts, hosts_info=hosts_info['results'], host_interface_ports=host_interface, protocol=eseries_initiator_protocol) }}"
+
+ - name: "{{'Create' if (eseries_remove_all_configuration_state | default('present')) == 'present' else 'Delete' }} all hosts objects on the storage array"
+ na_santricity_host:
+ ssid: "{{ current_eseries_ssid }}"
+ api_url: "{{ current_eseries_api_url }}"
+ api_username: "{{ current_eseries_api_username }}"
+ api_password: "{{ current_eseries_api_password }}"
+ validate_certs: "{{ current_eseries_validate_certs | default(omit) }}"
+ state: "{{ eseries_remove_all_configuration_state | default(hosts['expected_hosts'][item]['state'] | default(omit)) }}"
+ name: "{{ hosts['expected_hosts'][item]['sanitized_hostname'] }}"
+ ports: "{{ hosts['expected_hosts'][item]['ports'] }}"
+ force_port: "{{ eseries_host_force_port | default(omit) }}"
+ host_type: "{{ hosts['expected_hosts'][item]['host_type'] | default(omit) }}"
+ connection: local
+ loop: "{{ lookup('list', (hosts['expected_hosts'].keys()|list)) }}"
+
+ - name: "{{'Create' if (eseries_remove_all_configuration_state | default('present')) == 'present' else 'Delete' }} all required host groups on the storage array"
+ na_santricity_hostgroup:
+ ssid: "{{ current_eseries_ssid }}"
+ api_url: "{{ current_eseries_api_url }}"
+ api_username: "{{ current_eseries_api_username }}"
+ api_password: "{{ current_eseries_api_password }}"
+ validate_certs: "{{ current_eseries_validate_certs | default(omit) }}"
+ state: "{{ eseries_remove_all_configuration_state | default('present') }}"
+ name: "{{ item }}"
+ hosts: "{{ hosts['host_groups'][item] | default(omit) }}"
+ connection: local
+ loop: "{{ lookup('list', (hosts['host_groups'].keys()|list)) }}"
+
+ - name: Update facts on the storage array
+ na_santricity_facts:
+ ssid: "{{ current_eseries_ssid }}"
+ api_url: "{{ current_eseries_api_url }}"
+ api_username: "{{ current_eseries_api_username }}"
+ api_password: "{{ current_eseries_api_password }}"
+ validate_certs: "{{ current_eseries_validate_certs | default(omit) }}"
+ connection: local
+ register: storage_array_facts
+ when: eseries_remove_all_configuration_state is not defined or eseries_remove_all_configuration_state == False
+
+ - name: "{{'Map' if (eseries_remove_all_configuration_state | default('present')) == 'present' else 'Unmap' }} volume to host or host group."
+ na_santricity_lun_mapping:
+ ssid: "{{ current_eseries_ssid }}"
+ api_url: "{{ current_eseries_api_url }}"
+ api_username: "{{ current_eseries_api_username }}"
+ api_password: "{{ current_eseries_api_password }}"
+ validate_certs: "{{ current_eseries_validate_certs | default(omit) }}"
+ state: "{{ 'present' if item['target'] is defined else 'absent' }}"
+ volume: "{{ item['volume'] }}"
+ target: "{{ item['target'] | default(eseries_lun_mapping_host) | default(omit) }}"
+ lun: "{{ item['lun'] | default(omit) }}"
+ connection: local
+ loop: "{{ lookup('netapp_eseries.santricity.santricity_lun_mapping', storage_array_facts,
+ volumes=lookup('netapp_eseries.santricity.santricity_volume', hostvars[inventory_hostname]), wantlist=True) }}"
+ when: hosts != []
diff --git a/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/tasks/main.yml b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/tasks/main.yml
new file mode 100644
index 000000000..d19ff8427
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/tasks/main.yml
@@ -0,0 +1,52 @@
+- name: Set current storage system credentials
+ include_role:
+ name: netapp_eseries.santricity.nar_santricity_common
+ tasks_from: build_info.yml
+ when: current_eseries_api_url is not defined
+ tags:
+ - always
+
+- name: Override all inventory configuration states.
+ set_fact:
+ eseries_remove_all_configuration_state: absent
+ when: eseries_remove_all_configuration is defined and eseries_remove_all_configuration == True
+ tags:
+ - always
+
+- name: Unconfigure snapshot consistency groups
+ import_tasks: snapshot.yml
+ when: eseries_remove_all_configuration == True | default(False)
+
+- name: Configure NetApp E-Series storage system disk pool configuration
+ import_tasks: storage_pool_present.yml
+ tags:
+ - storage_pools
+
+- name: Configure NetApp E-Series storage system volume configuration
+ import_tasks: volume.yml
+ tags:
+ - volumes
+
+- name: Configure NetApp E-Series storage system disk pool configuration
+ import_tasks: storage_pool_absent.yml
+ tags:
+ - storage_pools
+
+- name: Configure storage system's hosts/hostgroups initiators
+ import_tasks: initiator.yml
+ tags:
+ - initiators
+
+- name: Map storage system's volumes to host objects
+ import_tasks: lun_mapping.yml
+ tags:
+ - lun_mappings
+
+- name: Configure controllers interfaces
+ import_tasks: interface.yml
+ tags:
+ - interface
+
+- name: Configure snapshot consistency groups
+ import_tasks: snapshot.yml
+ when: eseries_remove_all_configuration == False | default(True)
diff --git a/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/tasks/snapshot.yml b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/tasks/snapshot.yml
new file mode 100644
index 000000000..c99c2861b
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/tasks/snapshot.yml
@@ -0,0 +1,19 @@
+- name: "{{'Configure' if (eseries_remove_all_configuration_state | default('present')) == 'present' else 'Unconfigure' }} all snapshot consistency groups."
+ import_tasks: snapshot/group.yml
+ when: eseries_snapshot_groups | default(False) or eseries_snapshot_remove_unspecified == True
+ tags:
+ - snapshot_group
+
+- name: "{{'Configure' if (eseries_remove_all_configuration_state | default('present')) == 'present' else 'Unconfigure' }} all snapshot consistency group views."
+ import_tasks: snapshot/view.yml
+ when: eseries_snapshot_views | default(False) or eseries_snapshot_remove_unspecified == True
+ tags:
+ - snapshot_view
+
+- name: Rollback base volumes to point-in-time snapshot images.
+ import_tasks: snapshot/rollback.yml
+ when: 'eseries_remove_all_configuration == False | default(True) and
+ (eseries_snapshot_rollbacks | default(False) or
+ eseries_snapshot_remove_unspecified == True)'
+ tags:
+ - snapshot_rollback
diff --git a/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/tasks/snapshot/group.yml b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/tasks/snapshot/group.yml
new file mode 100644
index 000000000..473226f1d
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/tasks/snapshot/group.yml
@@ -0,0 +1,75 @@
+- name: Determine if any snapshot consistency groups are no longer needed.
+ block:
+ - name: Retrieve all snapshot consistency groups
+ uri:
+ url: "{{ current_eseries_api_url }}storage-systems/{{ current_eseries_ssid }}/consistency-groups"
+ method: GET
+ url_password: "{{ current_eseries_api_password }}"
+ url_username: "{{ current_eseries_api_username }}"
+ validate_certs: "{{ current_eseries_validate_certs | default(omit) }}"
+ connection: local
+ register: existing_consistency_groups
+
+ - name: Ensure all unspecified snapshot consistency groups are removed.
+ na_santricity_snapshot:
+ ssid: "{{ current_eseries_ssid }}"
+ api_url: "{{ current_eseries_api_url }}"
+ api_username: "{{ current_eseries_api_username }}"
+ api_password: "{{ current_eseries_api_password }}"
+ validate_certs: "{{ current_eseries_validate_certs | default(omit) }}"
+ state: absent
+ type: group
+ group_name: "{{ item }}"
+ connection: local
+ loop: "{{ unspecified_groups }}"
+ vars:
+ unspecified_groups: |-
+ {%- set unspecified_groups = [] -%}
+ {%- for existing_group in existing_consistency_groups["json"] -%}
+ {%- for group in eseries_snapshot_groups | default([]) if existing_group["name"] == group["name"] -%}
+ {%- else -%}
+ {%- if unspecified_groups.append(existing_group["name"]) -%}{%- endif -%}
+ {%- endfor -%}
+ {%- endfor -%}
+ {{- unspecified_groups -}}
+ when: (eseries_remove_all_configuration | default(False)) == False and eseries_snapshot_remove_unspecified == True
+
+- name: "Ensure all snapshot consistency groups {{'exist' if (eseries_remove_all_configuration_state | default('present')) == 'present' else 'are removed' }}."
+ na_santricity_snapshot:
+ ssid: "{{ current_eseries_ssid }}"
+ api_url: "{{ current_eseries_api_url }}"
+ api_username: "{{ current_eseries_api_username }}"
+ api_password: "{{ current_eseries_api_password }}"
+ validate_certs: "{{ current_eseries_validate_certs | default(omit) }}"
+ state: "{{ eseries_remove_all_configuration_state | default('present') }}"
+ type: group
+ group_name: "{{ item['name'] }}"
+ maximum_snapshots: "{{ item['maximum_snapshots'] }}"
+ alert_threshold_pct: "{{ item['alert_threshold_pct'] }}"
+ reserve_capacity_full_policy: "{{ item['reserve_capacity_full_policy'] }}"
+ preferred_reserve_storage_pool: "{{ item['preferred_reserve_storage_pool'] }}"
+ rollback_priority: "{{ item['rollback_priority'] }}"
+ volumes: "{{ item['volumes'] }}"
+ connection: local
+ loop: "{{ consistency_groups }}"
+ vars:
+ consistency_groups: |-
+ {%- set consistency_groups = [] -%}
+ {%- for group in eseries_snapshot_groups | default([]) -%}
+ {%- set info = {"name": group["name"],
+ "maximum_snapshots": group["maximum_snapshots"] | default(eseries_snapshot_groups_maximum_snapshots | default(omit)),
+ "reserve_capacity_pct": group["reserve_capacity_pct"] | default(eseries_snapshot_groups_reserve_capacity_pct | default(omit)),
+ "preferred_reserve_storage_pool": group["preferred_reserve_storage_pool"] | default(eseries_snapshot_groups_preferred_reserve_storage_pool | default(omit)),
+ "reserve_capacity_full_policy": group["reserve_capacity_full_policy"] | default(eseries_snapshot_groups_reserve_capacity_full_policy | default(omit)),
+ "alert_threshold_pct": group["alert_threshold_pct"] | default(eseries_snapshot_groups_alert_threshold_pct | default(omit)),
+ "rollback_priority": group["rollback_priority"] | default(eseries_snapshot_rollback_priority | default(omit)),
+ "volumes": []} -%}
+ {%- for volume in group["volumes"] -%}
+ {%- set volume_info = {"volume": volume["volume"]} -%}
+ {%- if volume["reserve_capacity_pct"] | default(False) and volume_info.update({"reserve_capacity_pct": volume["reserve_capacity_pct"]}) -%}{%- endif -%}
+ {%- if volume["reserve_capacity_pct"] | default(False) and volume_info.update({"preferred_reserve_storage_pool": volume["preferred_reserve_storage_pool"]}) -%}{%- endif -%}
+ {%- if info["volumes"].append(volume_info) -%}{%- endif -%}
+ {%- endfor -%}
+ {%- if consistency_groups.append(info) -%}{%- endif -%}
+ {%- endfor -%}
+ {{- consistency_groups -}}
diff --git a/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/tasks/snapshot/rollback.yml b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/tasks/snapshot/rollback.yml
new file mode 100644
index 000000000..45fae418e
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/tasks/snapshot/rollback.yml
@@ -0,0 +1,41 @@
+- name: Roll volumes back to expected point-in-time.
+ na_santricity_snapshot:
+ ssid: "{{ current_eseries_ssid }}"
+ api_url: "{{ current_eseries_api_url }}"
+ api_username: "{{ current_eseries_api_username }}"
+ api_password: "{{ current_eseries_api_password }}"
+ validate_certs: "{{ current_eseries_validate_certs | default(omit) }}"
+ state: rollback
+ group_name: "{{ item['group_name'] }}"
+ pit_name: "{{ item['pit_name'] | default(omit) }}"
+ pit_timestamp: "{{ item['pit_timestamp'] | default(omit) }}"
+ rollback_priority: "{{ item['rollback_priority'] | default(omit) }}"
+ rollback_backup: "{{ item['rollback_backup'] | default(omit) }}"
+ volumes: "{{ item['volumes'] | default(omit) }}"
+ connection: local
+ loop: "{{ consistency_group_rollbacks }}"
+ vars:
+ consistency_group_rollbacks: |-
+ {%- set consistency_group_rollbacks = [] -%}
+ {%- for rollback in eseries_snapshot_rollbacks | default([]) -%}
+ {%- set rollback_info = {"group_name": rollback["group_name"]} -%}
+ {%- if "pit_name" in (rollback.keys() | list) and rollback_info.update({"pit_name": rollback["pit_name"]}) -%}{%- endif -%}
+ {%- if "pit_timestamp" in (rollback.keys() | list) and rollback_info.update({"pit_timestamp": rollback["pit_timestamp"]}) -%}{%- endif -%}
+
+ {%- if "rollback_priority" in (rollback.keys() | list) or eseries_snapshot_rollback_priority is defined -%}
+ {%- if rollback_info.update({"rollback_priority": rollback["rollback_priority"] | default(eseries_snapshot_rollback_priority)}) -%}{%- endif -%}
+ {%- endif -%}
+ {%- if "rollback_backup" in (rollback.keys() | list) or eseries_snapshot_rollback_backup is defined -%}
+ {%- if rollback_info.update({"rollback_backup": rollback["rollback_backup"] | default(eseries_snapshot_rollback_backup)}) -%}{%- endif -%}
+ {%- endif -%}
+
+ {%- if "volumes" in (rollback.keys() | list) -%}
+ {%- if rollback_info.update({"volumes": []}) -%}{%- endif -%}
+ {%- for volume in rollback["volumes"] -%}
+ {%- if rollback_info["volumes"].append({"volume": volume}) -%}{%- endif -%}
+ {%- endfor -%}
+ {%- endif -%}
+
+ {%- if consistency_group_rollbacks.append(rollback_info) -%}{%- endif -%}
+ {%- endfor -%}
+ {{- consistency_group_rollbacks -}}
diff --git a/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/tasks/snapshot/view.yml b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/tasks/snapshot/view.yml
new file mode 100644
index 000000000..f8de06c6c
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/tasks/snapshot/view.yml
@@ -0,0 +1,116 @@
+- name: Determine if any snapshot consistency group views are not specified.
+ block:
+ - name: Retrieve all snapshot consistency groups
+ uri:
+ url: "{{ current_eseries_api_url }}storage-systems/{{ current_eseries_ssid }}/consistency-groups"
+ method: GET
+ url_password: "{{ current_eseries_api_password }}"
+ url_username: "{{ current_eseries_api_username }}"
+ validate_certs: "{{ current_eseries_validate_certs | default(omit) }}"
+ connection: local
+ register: existing_consistency_groups
+
+ - name: Retrieve all snapshot consistency group views
+ uri:
+ url: "{{ current_eseries_api_url }}storage-systems/{{ current_eseries_ssid }}/consistency-groups/views"
+ method: GET
+ url_password: "{{ current_eseries_api_password }}"
+ url_username: "{{ current_eseries_api_username }}"
+ validate_certs: "{{ current_eseries_validate_certs | default(omit) }}"
+ connection: local
+ register: existing_consistency_group_views
+
+ - name: Ensure all unspecified snapshot consistency group views are removed.
+ na_santricity_snapshot:
+ ssid: "{{ current_eseries_ssid }}"
+ api_url: "{{ current_eseries_api_url }}"
+ api_username: "{{ current_eseries_api_username }}"
+ api_password: "{{ current_eseries_api_password }}"
+ validate_certs: "{{ current_eseries_validate_certs | default(omit) }}"
+ state: absent
+ type: view
+ group_name: "{{ item['group_name'] }}"
+ view_name: "{{ item['view_name'] }}"
+ connection: local
+ loop: "{{ unspecified_groups }}"
+ vars:
+ unspecified_groups: |-
+ {%- set unspecified_group_views = [] -%}
+ {%- for existing_view in existing_consistency_group_views["json"] -%}
+ {%- for view in eseries_snapshot_views | default([]) if existing_view["name"] == view["name"] -%}
+ {#- DO NOTHING -#}
+ {%- else -%}
+ {%- for group in existing_consistency_groups["json"] if group["id"] == existing_view["groupRef"] -%}
+ {%- if unspecified_group_views.append({"group_name": group["name"], "view_name": existing_view["name"]}) -%}{%- endif -%}
+ {%- endfor -%}
+ {%- endfor -%}
+ {%- endfor -%}
+ {{- unspecified_group_views -}}
+ when: (eseries_remove_all_configuration | default(False)) == False and eseries_snapshot_remove_unspecified == True
+
+- name: "Ensure all snapshot consistency views {{'exist' if (eseries_remove_all_configuration_state | default('present')) == 'present' else 'are removed' }}."
+ na_santricity_snapshot:
+ ssid: "{{ current_eseries_ssid }}"
+ api_url: "{{ current_eseries_api_url }}"
+ api_username: "{{ current_eseries_api_username }}"
+ api_password: "{{ current_eseries_api_password }}"
+ validate_certs: "{{ current_eseries_validate_certs | default(omit) }}"
+ state: "{{ eseries_remove_all_configuration_state | default('present') }}"
+ type: view
+ view_name: "{{ item['name'] }}"
+ group_name: "{{ item['group_name'] }}"
+ pit_name: "{{ item['pit_name'] | default(omit) }}"
+ pit_timestamp: "{{ item['pit_timestamp'] | default(omit) }}"
+ volumes: "{{ item['volumes'] | default(omit) }}"
+ reserve_capacity_pct: "{{ item['reserve_capacity_pct'] | default(omit) }}"
+ preferred_reserve_storage_pool: "{{ item['preferred_reserve_storage_pool'] | default(omit) }}"
+ alert_threshold_pct: "{{ item['alert_threshold_pct'] | default(omit) }}"
+ view_host: "{{ item['host'] | default(omit) }}"
+ view_writable: "{{ item['writable'] | default(omit) }}"
+ view_validate: "{{ item['validate'] | default(omit) }}"
+ connection: local
+ loop: "{{ consistency_group_views }}"
+ vars:
+ consistency_group_views: |-
+ {%- set consistency_group_views = [] -%}
+ {%- for view in eseries_snapshot_views | default([]) -%}
+ {%- set view_info = {"name": view["name"], "group_name": view["group_name"]} -%}
+ {%- if "pit_name" in (view.keys() | list) and view_info.update({"pit_name": view["pit_name"]}) -%}{%- endif -%}
+ {%- if "pit_timestamp" in (view.keys() | list) and view_info.update({"pit_timestamp": view["pit_timestamp"]}) -%}{%- endif -%}
+
+ {%- if "host" in (view.keys() | list) or eseries_snapshot_views_host is defined -%}
+ {%- if view_info.update({"host": view["host"] | default(eseries_snapshot_views_host)}) -%}{%- endif -%}{%- endif -%}
+ {%- if "reserve_capacity_pct" in (view.keys() | list) or eseries_snapshot_views_reserve_capacity_pct is defined -%}
+ {%- if view_info.update({"reserve_capacity_pct": view["reserve_capacity_pct"] | default(eseries_snapshot_views_reserve_capacity_pct)}) -%}{%- endif -%}{%- endif -%}
+ {%- if "preferred_reserve_storage_pool" in (view.keys() | list) or eseries_snapshot_views_preferred_reserve_storage_pool is defined -%}
+ {%- if view_info.update({"preferred_reserve_storage_pool": view["preferred_reserve_storage_pool"] | default(eseries_snapshot_views_preferred_reserve_storage_pool)}) -%}{%- endif -%}{%- endif -%}
+ {%- if "alert_threshold_pct" in (view.keys() | list) or eseries_snapshot_views_alert_threshold_pct is defined -%}
+ {%- if view_info.update({"alert_threshold_pct": view["alert_threshold_pct"] | default(eseries_snapshot_views_alert_threshold_pct)}) -%}{%- endif -%}{%- endif -%}
+ {%- if "writable" in (view.keys() | list) or eseries_snapshot_views_writable is defined -%}
+ {%- if view_info.update({"writable": view["writable"] | default(eseries_snapshot_views_writable)}) -%}{%- endif -%}{%- endif -%}
+ {%- if "validate" in (view.keys() | list) or eseries_snapshot_views_validate is defined -%}
+ {%- if view_info.update({"validate": view["validate"] | default(eseries_snapshot_views_validate)}) -%}{%- endif -%}{%- endif -%}
+ {%- if "alert_threshold_pct" in (view.keys() | list) or eseries_snapshot_views_alert_threshold_pct is defined -%}
+ {%- if view_info.update({"alert_threshold_pct": view["alert_threshold_pct"] | default(eseries_snapshot_views_alert_threshold_pct)}) -%}{%- endif -%}{%- endif -%}
+
+ {%- if "volumes" in (view.keys() | list) -%}
+ {%- if view_info.update({"volumes": [] }) -%}{%- endif -%}
+ {%- for volume in view["volumes"] -%}
+ {%- set volume_info = {"volume": volume["volume"],
+ "reserve_capacity_pct": volume["reserve_capacity_pct"] | default(view_info["reserve_capacity_pct"] | default(False)),
+ "snapshot_volume_writable": volume["writable"] | default(view_info["writable"] | default(True)),
+ "snapshot_volume_validate": volume["validate"] | default(view_info["validate"] | default(False))} -%}
+
+ {%- if "host" in (volume.keys() | list) or "host" in (view_info.keys() | list) -%}
+ {%- if volume_info.update({"snapshot_volume_host": volume["host"] | default(view_info["host"])}) -%}{%- endif -%}{%- endif -%}
+ {%- if "lun" in (volume.keys() | list) -%}
+ {%- if volume_info.update({"snapshot_volume_lun": volume["lun"]}) -%}{%- endif -%}{%- endif -%}
+ {%- if "preferred_reserve_storage_pool" in (volume.keys() | list) or "preferred_reserve_storage_pool" in (view_info.keys() | list) -%}
+ {%- if volume_info.update({"preferred_reserve_storage_pool": volume["preferred_reserve_storage_pool"] | default(view_info["preferred_reserve_storage_pool"])}) -%}{%- endif -%}{%- endif -%}
+
+ {%- if view_info["volumes"].append(volume_info) -%}{%- endif -%}
+ {%- endfor -%}
+ {%- endif -%}
+ {%- if consistency_group_views.append(view_info) -%}{%- endif -%}
+ {%- endfor -%}
+ {{- consistency_group_views -}} \ No newline at end of file
diff --git a/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/tasks/storage_pool_absent.yml b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/tasks/storage_pool_absent.yml
new file mode 100644
index 000000000..49e2c2d35
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/tasks/storage_pool_absent.yml
@@ -0,0 +1,27 @@
+- name: Unconfigure NetApp E-Series storage system disk pool configuration
+ na_santricity_storagepool:
+ ssid: "{{ current_eseries_ssid }}"
+ api_url: "{{ current_eseries_api_url }}"
+ api_username: "{{ current_eseries_api_username }}"
+ api_password: "{{ current_eseries_api_password }}"
+ validate_certs: "{{ current_eseries_validate_certs | default(omit) }}"
+ state: "{{ item['state'] }}"
+ name: "{{ item['name'] }}"
+ raid_level: "{{ item['raid_level'] | default(eseries_storage_pool_raid_level | default(omit)) }}"
+ secure_pool: "{{ item['secure_pool'] | default(eseries_storage_pool_secure_pool | default(omit)) }}"
+ criteria_drive_count: "{{ item['criteria_drive_count'] | default(eseries_storage_pool_criteria_drive_count | default(omit)) }}"
+ reserve_drive_count: "{{ item['reserve_drive_count'] | default(eseries_storage_pool_reserve_drive_count | default(omit)) }}"
+ criteria_min_usable_capacity: "{{ item['criteria_min_usable_capacity'] | default(eseries_storage_pool_criteria_min_usable_capacity | default(omit)) }}"
+ criteria_drive_type: "{{ item['criteria_drive_type'] | default(eseries_storage_pool_criteria_drive_type | default(omit)) }}"
+ criteria_size_unit: "{{ item['criteria_size_unit'] | default(eseries_storage_pool_criteria_size_unit | default(omit)) }}"
+ criteria_drive_min_size: "{{ item['criteria_drive_min_size'] | default(eseries_storage_pool_criteria_drive_min_size | default(omit)) }}"
+ criteria_drive_max_size: "{{ item['criteria_drive_max_size'] | default(eseries_storage_pool_criteria_drive_max_size | default(omit)) }}"
+ criteria_drive_require_da: "{{ item['criteria_drive_require_da'] | default(eseries_storage_pool_criteria_drive_require_da | default(omit)) }}"
+ criteria_drive_require_fde: "{{ item['criteria_drive_require_fde'] | default(eseries_storage_pool_criteria_drive_require_fde | default(omit)) }}"
+ usable_drives: "{{ item['usable_drives'] | default(eseries_storage_pool_usable_drives | default(omit)) }}"
+ remove_volumes: "{{ item['remove_volumes'] | default(eseries_storage_pool_remove_volumes | default(omit)) }}"
+ erase_secured_drives: "{{ item['erase_secured_drives'] | default(eseries_storage_pool_erase_secured_drives | default(omit)) }}"
+ ddp_critical_threshold_pct: "{{ item['ddp_critical_threshold_pct'] | default(eseries_storage_pool_ddp_critical_threshold_pct | default(omit)) }}"
+ ddp_warning_threshold_pct: "{{ item['ddp_warning_threshold_pct'] | default(eseries_storage_pool_ddp_warning_threshold_pct | default(omit)) }}"
+ connection: local
+ loop: "{{ query('netapp_eseries.santricity.santricity_storage_pool', hostvars[inventory_hostname], state='absent') }}"
diff --git a/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/tasks/storage_pool_present.yml b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/tasks/storage_pool_present.yml
new file mode 100644
index 000000000..d90430ab4
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/tasks/storage_pool_present.yml
@@ -0,0 +1,28 @@
+- name: Configure NetApp E-Series storage system disk pool configuration
+ na_santricity_storagepool:
+ ssid: "{{ current_eseries_ssid }}"
+ api_url: "{{ current_eseries_api_url }}"
+ api_username: "{{ current_eseries_api_username }}"
+ api_password: "{{ current_eseries_api_password }}"
+ validate_certs: "{{ current_eseries_validate_certs | default(omit) }}"
+ state: "{{ item['state'] }}"
+ name: "{{ item['name'] }}"
+ raid_level: "{{ item['raid_level'] | default(eseries_storage_pool_raid_level | default(omit)) }}"
+ secure_pool: "{{ item['secure_pool'] | default(eseries_storage_pool_secure_pool | default(omit)) }}"
+ criteria_drive_count: "{{ item['criteria_drive_count'] | default(eseries_storage_pool_criteria_drive_count | default(omit)) }}"
+ reserve_drive_count: "{{ item['reserve_drive_count'] | default(eseries_storage_pool_reserve_drive_count | default(omit)) }}"
+ criteria_min_usable_capacity: "{{ item['criteria_min_usable_capacity'] | default(eseries_storage_pool_criteria_min_usable_capacity | default(omit)) }}"
+ criteria_drive_type: "{{ item['criteria_drive_type'] | default(eseries_storage_pool_criteria_drive_type | default(omit)) }}"
+ criteria_drive_interface_type: "{{ item['criteria_drive_interface_type'] | default(eseries_storage_pool_criteria_drive_interface_type | default(omit)) }}"
+ criteria_size_unit: "{{ item['criteria_size_unit'] | default(eseries_storage_pool_criteria_size_unit | default(omit)) }}"
+ criteria_drive_min_size: "{{ item['criteria_drive_min_size'] | default(eseries_storage_pool_criteria_drive_min_size | default(omit)) }}"
+ criteria_drive_max_size: "{{ item['criteria_drive_max_size'] | default(eseries_storage_pool_criteria_drive_max_size | default(omit)) }}"
+ criteria_drive_require_da: "{{ item['criteria_drive_require_da'] | default(eseries_storage_pool_criteria_drive_require_da | default(omit)) }}"
+ criteria_drive_require_fde: "{{ item['criteria_drive_require_fde'] | default(eseries_storage_pool_criteria_drive_require_fde | default(omit)) }}"
+ usable_drives: "{{ item['usable_drives'] | default(eseries_storage_pool_usable_drives | default(omit)) }}"
+ remove_volumes: "{{ item['remove_volumes'] | default(eseries_storage_pool_remove_volumes | default(omit)) }}"
+ erase_secured_drives: "{{ item['erase_secured_drives'] | default(eseries_storage_pool_erase_secured_drives | default(omit)) }}"
+ ddp_critical_threshold_pct: "{{ item['ddp_critical_threshold_pct'] | default(eseries_storage_pool_ddp_critical_threshold_pct | default(omit)) }}"
+ ddp_warning_threshold_pct: "{{ item['ddp_warning_threshold_pct'] | default(eseries_storage_pool_ddp_warning_threshold_pct | default(omit)) }}"
+ connection: local
+ loop: "{{ query('netapp_eseries.santricity.santricity_storage_pool', hostvars[inventory_hostname], state='present') }}"
diff --git a/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/tasks/volume.yml b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/tasks/volume.yml
new file mode 100644
index 000000000..86a32ac29
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/tasks/volume.yml
@@ -0,0 +1,34 @@
+- name: "{{'Configure' if (eseries_remove_all_configuration_state | default('present')) == 'present' else 'Unconfigure' }} Netapp E-Series storage system disk pool volume configuration"
+ na_santricity_volume:
+ ssid: "{{ current_eseries_ssid }}"
+ api_url: "{{ current_eseries_api_url }}"
+ api_username: "{{ current_eseries_api_username }}"
+ api_password: "{{ current_eseries_api_password }}"
+ validate_certs: "{{ current_eseries_validate_certs | default(omit) }}"
+ state: "{{ item['state'] }}"
+ name: "{{ item['name'] }}"
+ storage_pool_name: "{{ item['storage_pool_name'] }}"
+ size: "{{ item['size'] | default(eseries_volume_size | default(omit)) }}"
+ size_unit: "{{ item['size_unit'] | default(eseries_volume_size_unit | default(omit)) }}"
+ segment_size_kb: "{{ item['segment_size_kb'] | default(eseries_volume_segment_size_kb | default(omit)) }}"
+ owning_controller: "{{ item['owning_controller'] | default(eseries_volume_owning_controller | default(omit)) }}"
+ thin_provision: "{{ item['thin_provision'] | default(eseries_volume_thin_provision | default(omit)) }}"
+ thin_volume_repo_size: "{{ item['thin_volume_repo_size'] | default(eseries_volume_thin_volume_repo_size | default(omit)) }}"
+ thin_volume_max_repo_size: "{{ item['thin_volume_max_repo_size'] | default(eseries_volume_thin_volume_max_repo_size | default(omit)) }}"
+ thin_volume_expansion_policy: "{{ item['thin_volume_expansion_policy'] | default(eseries_volume_thin_volume_expansion_policy | default(omit)) }}"
+ thin_volume_growth_alert_threshold: "{{ item['thin_volume_growth_alert_threshold'] | default(eseries_volume_thin_volume_growth_alert_threshold | default(omit)) }}"
+ ssd_cache_enabled: "{{ item['ssd_cache_enabled'] | default(eseries_volume_ssd_cache_enabled | default(omit)) }}"
+ data_assurance_enabled: "{{ item['data_assurance_enabled'] | default(eseries_volume_data_assurance_enabled | default(omit)) }}"
+ read_cache_enable: "{{ item['read_cache_enable'] | default(eseries_volume_read_cache_enable | default(omit)) }}"
+ read_ahead_enable: "{{ item['read_ahead_enable'] | default(eseries_volume_read_ahead_enable | default(omit)) }}"
+ write_cache_enable: "{{ item['write_cache_enable'] | default(eseries_volume_write_cache_enable | default(omit)) }}"
+ write_cache_mirror_enable: "{{ item['write_cache_mirror_enable'] | default(eseries_volume_write_cache_mirror_enable | default(omit)) }}"
+ cache_without_batteries: "{{ item['cache_without_batteries'] | default(eseries_volume_cache_without_batteries | default(omit)) }}"
+ allow_expansion: "{{ item['allow_expansion'] | default(eseries_volume_allow_expansion | default(omit)) }}"
+ wait_for_initialization: "{{ item['wait_for_initialization'] | default(eseries_volume_wait_for_initialization | default(omit)) }}"
+ workload_name: "{{ item['workload_name'] | default(eseries_volume_workload_name | default(omit)) }}"
+ workload_metadata: "{{ item['workload_metadata'] | default(item['metadata'] | default(eseries_volume_workload_metadata | default(eseries_volume_metadata | default(omit)))) }}"
+ volume_metadata: "{{ item['volume_metadata'] | default(eseries_volume_volume_metadata | default(omit)) }}"
+ connection: local
+ when: eseries_storage_pool_configuration is defined
+ loop: "{{ query('netapp_eseries.santricity.santricity_volume', hostvars[inventory_hostname]) }}"
diff --git a/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/templates/hostnqn.j2 b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/templates/hostnqn.j2
new file mode 100644
index 000000000..90478d074
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/templates/hostnqn.j2
@@ -0,0 +1 @@
+{{ item["stdout_lines"][0] }}
diff --git a/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/templates/initiatorname_iscsi.j2 b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/templates/initiatorname_iscsi.j2
new file mode 100644
index 000000000..f6c3740eb
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_host/templates/initiatorname_iscsi.j2
@@ -0,0 +1,11 @@
+##
+## /etc/iscsi/iscsi.initiatorname
+##
+## Default iSCSI Initiatorname.
+##
+## DO NOT EDIT OR REMOVE THIS FILE!
+## If you remove this file, the iSCSI daemon will not start.
+## If you change the InitiatorName, existing access control lists
+## may reject this initiator. The InitiatorName must be unique
+## for each iSCSI initiator. Do NOT duplicate iSCSI InitiatorNames.
+InitiatorName={{ item["stdout_lines"][0] }}
diff --git a/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_management/.travis.yml b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_management/.travis.yml
new file mode 100644
index 000000000..36bbf6208
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_management/.travis.yml
@@ -0,0 +1,29 @@
+---
+language: python
+python: "2.7"
+
+# Use the new container infrastructure
+sudo: false
+
+# Install ansible
+addons:
+ apt:
+ packages:
+ - python-pip
+
+install:
+ # Install ansible
+ - pip install ansible
+
+ # Check ansible version
+ - ansible --version
+
+ # Create ansible.cfg with correct roles_path
+ - printf '[defaults]\nroles_path=../' >ansible.cfg
+
+script:
+ # Basic role syntax check
+ - ansible-playbook tests/test.yml -i tests/inventory --syntax-check
+
+notifications:
+ webhooks: https://galaxy.ansible.com/api/v1/notifications/ \ No newline at end of file
diff --git a/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_management/README.md b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_management/README.md
new file mode 100644
index 000000000..d5b454c96
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_management/README.md
@@ -0,0 +1,301 @@
+nar_santricity_management
+=========
+ Manages NetApp E-Series storage system's name, passwords, management interfaces, alerts, syslog, auditlog, asup, ldap, certificates, drive firmware and controller firmware.
+
+Requirements
+------------
+ - NetApp E-Series E2800 platform or newer or NetApp E-Series SANtricity Web Services Proxy configured for older E-Series storage systems.
+
+Tested Ansible Versions
+-----------------------
+ - Ansible 5.x (ansible-core 2.12)
+
+Example Playbook
+----------------
+ - hosts: eseries_storage_systems
+ gather_facts: false
+ collection:
+ - netapp_eseries.santricity
+ tasks:
+ - name: Ensure NetApp E-Series storage system is properly configured
+ import_role:
+ name: nar_santricity_management
+
+Example Storage System Inventory File (Discover storage system with proxy)
+-------------------------------------
+ eseries_system_serial: "012345678901" # Be sure to quote if the serial is all numbers and begins with zero.
+ eseries_system_password: admin_password
+ eseries_proxy_api_url: https://192.168.1.100:8443/devmgr/v2/
+ eseries_proxy_api_password: admin_password
+ eseries_subnet: 192.168.1.0/24
+ eseries_prefer_embedded: false # Overrides the default behavior of using Web Services Proxy when eseries_proxy_api_url is defined. This will only effect storage systems that have Embedded Web Services.
+ eseries_validate_certs: false
+
+ eseries_system_name: my_eseries_array
+ eseries_system_cache_block_size: 128
+ eseries_system_cache_flush_threshold: 90
+ eseries_system_autoload_balance: enabled
+ eseries_system_host_connectivity_reporting: enabled
+ eseries_system_default_host_type: Linux DM-MP
+
+ eseries_management_interfaces:
+ config_method: static
+ subnet_mask: 255.255.255.0
+ gateway: 192.168.1.1
+ dns_config_method: static
+ dns_address: 192.168.1.253
+ dns_address_backup: 192.168.1.254
+ ssh: true
+ ntp_config_method: static
+ ntp_address: 192.168.1.200
+ ntp_address_backup: 192.168.1.201
+ controller_a:
+ - address: 192.168.1.100
+ - address: 192.168.1.101
+ controller_b:
+ - address: 192.168.1.102
+ - address: 192.168.1.103
+
+ eseries_ldap_state: present
+ eseries_ldap_bind_username:
+ eseries_ldap_bind_password:
+ eseries_ldap_server:
+ eseries_ldap_search_base:
+ eseries_ldap_role_mappings:
+ ".*":
+ - storage.admin
+ - storage.monitor
+ - support.admin
+ - security.admin
+
+ eseries_client_certificate_certificates:
+ - /path/to/client_certificate.crt
+ eseries_server_certificate:
+ controller_a:
+ public_certificate: "/path/to/controller_a_server_certificate_bundle.pem"
+ controller_b:
+ public_certificate: "/path/to/controller_b_server_certificate_bundle.pem"
+
+ eseries_firmware_firmware: "/path/to/firmware.dlp"
+ eseries_firmware_nvsram: "/path/to/nvsram.dlp"
+ eseries_drive_firmware_firmware_list:
+ - "/path/to/drive_firmware.dlp"
+
+ eseries_asup_state: enabled
+ eseries_asup_active: true
+ eseries_asup_days: [sunday, saturday]
+ eseries_asup_start: 17
+ eseries_asup_end: 24
+ eseries_asup_validate: false
+ eseries_asup_method: email
+ eseries_asup_email:
+ server: smtp.example.com
+ sender: noreply@example.com
+
+ eseries_syslog_state: present
+ eseries_syslog_address: 192.168.1.150
+ eseries_syslog_protocol: udp
+ eseries_syslog_port: 514
+ eseries_alert_syslog_servers:
+ - "address": 192.168.1.150
+ "port": 514
+
+Example Storage System Inventory File (Without storage system discovery)
+-------------------------------------
+ eseries_system_api_url: https://192.168.1.200:8443/devmgr/v2/
+ eseries_system_password: admin_password
+ eseries_validate_certs: false
+
+ (...) # Same as the previous example
+
+Role Variables
+--------------
+**Note that when values are specified below, they indicate the default value.**
+
+ # Web Services Embedded information
+ eseries_subnet: # Network subnet to search for the storage system specified in CIDR form. Example: 192.168.1.0/24
+ eseries_system_serial: # Storage system serial number. Be sure to quote if the serial is all numbers and begins with zero. (This is located on a label at the top-left towards the front on the device)
+ eseries_system_addresses: # Storage system management IP addresses. Only required when eseries_system_serial or eseries_system_api_url are not defined. When not specified, addresses will be populated with eseries_management_interfaces controller addresses.
+ eseries_system_api_url: # Url for the storage system's for embedded web services rest api. Example: https://192.168.10.100/devmgr/v2
+ eseries_system_username: admin # Username for the storage system's for embedded web services rest api
+ eseries_system_password: # Password for the storage system's for embedded web services rest api and when the admin password has not been set eseries_system_password will be used to set it.
+ eseries_system_old_password: # Previous admin password. This is used to change the current admin password by setting this variable to the current
+ # password and eseries_system_password to the new password.
+ eseries_proxy_ssid: # Arbitrary string for the proxy to represent the storage system. eseries_system_serial will be used when not defined.
+ eseries_template_api_url: # Template for the web services api url. Default: https://0.0.0.0:8443/devmgr/v2/
+ eseries_prefer_embedded: false # Overrides the default behavior of using Web Services Proxy when eseries_proxy_api_url is defined. This will only effect storage systems that have Embedded Web Services.
+ eseries_validate_certs: true # Indicates Whether SSL certificates should be verified. Used for both embedded and proxy. Choices: true, false
+
+ # Web Services Proxy information
+ Note: eseries_proxy_* variables are required to discover storage systems prior to SANtricity OS version 11.60.2.
+ eseries_proxy_api_url: # Url for the storage system's for proxy web services rest api. Example: https://192.168.10.100/devmgr/v2
+ eseries_proxy_api_username: # Username for the storage system's for proxy web services rest api.
+ eseries_proxy_api_password: # Password for the storage system's for proxy web services rest api and when the admin password has not been set
+ # eseries_proxy_api_password will be used to set it.
+
+ # Global storage system information
+ eseries_system_name: # Name of the storage system.
+ eseries_system_cache_block_size: # Cache block size
+ eseries_system_cache_flush_threshold: # Unwritten data will be flushed when exceeds this threshold
+ eseries_system_autoload_balance: # Whether automatic load balancing should be enabled. Choices: enabled, disabled
+ eseries_system_host_connectivity_reporting: # Whether host connectivity reporting should be enabled. Choices: enabled, disabled
+ eseries_system_login_banner_message: # Message that appears prior to the login.
+ eseries_system_controller_shelf_id: # Controller shelf identifier.
+ eseries_system_default_host_type: # Only required when using something other than Linux kernel 3.10 or later with DM-MP (Linux DM-MP),
+ # non-clustered Windows (Windows), or the storage system default host type is incorrect. Common definitions below:
+ # - AIX MPIO: The Advanced Interactive Executive (AIX) OS and the native MPIO driver
+ # - AVT 4M: Silicon Graphics, Inc. (SGI) proprietary multipath driver; refer to the SGI installation documentation for more information
+ # - HP-UX: The HP-UX OS with native multipath driver
+ # - Linux ATTO: The Linux OS and the ATTO Technology, Inc. driver (must use ATTO FC HBAs)
+ # - Linux DM-MP: The Linux OS and the native DM-MP driver
+ # - Linux Pathmanager: The Linux OS and the SGI proprietary multipath driver; refer to the SGI installation documentation for more information
+ # - Mac: The Mac OS and the ATTO Technology, Inc. driver
+ # - ONTAP: FlexArray
+ # - Solaris 11 or later: The Solaris 11 or later OS and the native MPxIO driver
+ # - Solaris 10 or earlier: The Solaris 10 or earlier OS and the native MPxIO driver
+ # - SVC: IBM SAN Volume Controller
+ # - VMware: ESXi OS
+ # - Windows: Windows Server OS and Windows MPIO with a DSM driver
+ # - Windows Clustered: Clustered Windows Server OS and Windows MPIO with a DSM driver
+ # - Windows ATTO: Windows OS and the ATTO Technology, Inc. driver
+
+ # Role-based username passwords
+ eseries_system_monitor_password: # Storage system monitor username password
+ eseries_system_security_password: # Storage system security username password
+ eseries_system_storage_password: # Storage system storage username password
+ eseries_system_support_password: # Storage system support username password
+
+ # SSL/TLS certificate configurations
+ eseries_client_certificate_common_certificates: # List of common client certificate file paths. These files will be appended to each client certificate list.
+ eseries_client_certificate_certificates: # List of client certificate file paths
+ eseries_server_certificate_common_certificates: # List of common server certificates. These files will be appended to each controller's server certificate list.
+ eseries_server_certificate_common_passphrase: # Common passphrase for decrypting PEM (PKCS8) private key.
+ eseries_server_certificate:
+ controller_a:
+ certificates: # List of server certificates for the storage systems controller A. Leave blank to use self-signed certificate.
+ passphrase: # Passphrase for decrypting PEM (PKCS8) private key.
+ controller_b:
+ certificates: # List of server certificates for the storage systems controller B. Leave blank to use self-signed certificate.
+ passphrase: # Passphrase for decrypting PEM (PKCS8) private key.
+
+ # Storage management interface defaults
+ Note: eseries_management_* variables have the lowest priority and will be overwritten by those found in eseries_management_interfaces; use these to defined host group defaults.
+ eseries_management_config_method: # Default config method for all management interfaces. Choices: static, dhcp
+ eseries_management_subnet_mask: # Default subnet mask for all management interfaces
+ eseries_management_gateway: # Default gateway for all management interfaces
+ eseries_management_dns_config_method: # Default DNS config method for all management interfaces
+ eseries_management_dns_address: # Default primary DNS address for all management interfaces
+ eseries_management_dns_address_backup: # Default backup DNS address for all management interfaces
+ eseries_management_ntp_config_method: # Default NTP config method for all management interfaces
+ eseries_management_ntp_address: # Default primary NTP address for all management interfaces
+ eseries_management_ntp_address_backup: # Default backup NTP address for all management interfaces
+ eseries_management_ssh: # Default SSH access for all management interfaces. Choices: true, false
+ eseries_management_interfaces:
+ config_method: # Config method for all management interfaces. Choices: static, dhcp
+ subnet_mask: # Subnet mask for all management interfaces
+ gateway_mask: # Gateway for all management interfaces
+ dns_config_method: # DNS config method for all management interfaces
+ dns_address: # Primary DNS address for all management interfaces
+ dns_address_backup: # Backup DNS address for all management interfaces
+ ntp_config_method: # NTP config method for all management interfaces
+ ntp_address: # Primary NTP address for all management interfaces
+ ntp_address_backup: # Backup NTP address for all management interfaces
+ ssh: # SSH access for all management interfaces. Choices: true, false
+ controller_a: # List of controller A ports
+ - address: # IPv4 address for controller A
+ config_method: # Config method for controller A. Choices: static, dhcp
+ subnet_mask: # Subnet mask for controller A
+ gateway: # Gateway for controller A
+ dns_config_method: # DNS config method for controller A
+ dns_address: # Primary DNS address for controller A
+ dns_address_backup: # Backup DNS address for controller A
+ ntp_config_method: # NTP config method for controller A
+ ntp_address: # Primary NTP address for controller A
+ ntp_address_backup: # Backup NTP address for controller A
+ ssh: # SSH access for controller A. Choices: true, false
+ controller_b: # List of controller B ports
+ - (...) # Same as for controller A but for controller B.
+
+ # Alerts configuration defaults
+ eseries_alerts_state: # Whether to enable storage system alerts. Choices: enabled, disabled
+ eseries_alerts_contact: # This allows owner to specify free-form contact information such as email or phone number.
+ eseries_alerts_recipients: # List containing e-mails that should be sent notifications when alerts are issued.
+ eseries_alerts_sender: # Sender email. This does not necessarily need to be a valid e-mail.
+ eseries_alerts_server: # Fully qualified domain name, IPv4 address, or IPv6 address of the mail server.
+ eseries_alerts_test: false # When changes are made to the storage system alert configuration a test e-mail will be sent. Choices: true, false
+ eseries_alert_syslog_servers: # List of dictionaries where each dictionary contains a syslog server entry. [{"address": <syslog_address>, "port": 514}]
+ eseries_alert_syslog_test: false # When changes are made to the alerts syslog servers configuration a test message will be sent to them. Choices: true, false
+
+ # LDAP configuration defaults
+ eseries_ldap_state: # Whether LDAP should be configured
+ eseries_ldap_identifier: # The user attributes that should be considered for the group to role mapping
+ eseries_ldap_user_attribute: # Attribute used to the provided username during authentication.
+ eseries_ldap_bind_username: # User account that will be used for querying the LDAP server.
+ eseries_ldap_bind_password: # Password for the bind user account
+ eseries_ldap_server: # LDAP server URL.
+ eseries_ldap_search_base: # Search base used for find user's group membership
+ eseries_ldap_role_mappings: # Dictionary of user groups, each containing the list of access roles.
+ # Role choices: storage.admin - allows users full read/writes access to storage objects and operations.
+ # storage.monitor - allows users read-only access to storage objects and operations.
+ # storage.admin - allows users access to hardware, diagnostic information, major event logs,
+ # and other critical support-related functionality, but not the sorage configuration.
+ # security.admin - allows users access to authentication/authorization configuration, as
+ # well as the audit log configuration, adn certification management.
+
+ # Drive firmware defaults
+ eseries_drive_firmware_firmware_list: # Local path list for drive firmware.
+ eseries_drive_firmware_wait_for_completion: # Forces drive firmware upgrades to wait for all associated tasks to complete. Choices: true, false
+ eseries_drive_firmware_ignore_inaccessible_drives: # Forces drive firmware upgrades to ignore any inaccessible drives. Choices: true, false
+ eseries_drive_firmware_upgrade_drives_online: # Forces drive firmware upgrades to be performed while I/Os are accepted. Choices: true, false
+
+ # Controller firmware defaults
+ eseries_firmware_nvsram: # Local path for NVSRAM file.
+ eseries_firmware_firmware: # Local path for controller firmware file.
+ eseries_firmware_wait_for_completion: # Forces controller firmware upgrade to wait until upgrade has completed before continuing. Choices: true, false
+ eseries_firmware_clear_mel_events: # Forces firmware upgrade to be attempted regardless of the health check results. Choices: true, false
+
+ # Auto-Support configuration defaults
+ eseries_asup_state: # Whether auto support (ASUP) should be enabled. Choices: enabled, disabled
+ eseries_asup_active: # Enables active monitoring which allows NetApp support personnel to request support data to resolve issues. Choices: true, false
+ eseries_asup_days: # List of days of the week. Choices: monday, tuesday, wednesday, thursday, friday, saturday, sunday
+ eseries_asup_start: # Hour of the day(s) to start ASUP bundle transmissions. Start time must be less than end time. Choices: 0-23
+ eseries_asup_end: # Hour of the day(s) to end ASUP bundle transmissions. Start time must be less than end time. Choices: 1-24
+ eseries_asup_method: # ASUP delivery method. Choices https, http, email (default: https)
+ eseries_asup_routing_type: # ASUP delivery routing type for https or http. Choices: direct, proxy, script (default: direct)
+ eseries_asup_proxy: # ASUP proxy delivery method information.
+ host: # ASUP proxy host IP address or FQDN. When eseries_asup_routing_type==proxy this must be specified.
+ port: # ASUP proxy host port. When eseries_asup_routing_type==proxy this must be specified.
+ username: # ASUP proxy username.
+ password: # ASUP proxy password.
+ script: # ASUP proxy host script.
+ eseries_asup_email: # ASUP email delivery configuration information
+ server: # ASUP email server
+ sender: # ASUP email sender
+ test_recipient: # ASUP configuration mail test recipient
+ eseries_maintenance_duration: # Duration in hours (1-72) the ASUP maintenance mode will be active
+ eseries_maintenance_emails: # List of email addresses for maintenance notifications
+ eseries_asup_validate: # Verify ASUP configuration prior to applying changes
+
+ # Audit-log configuration defaults
+ eseries_auditlog_enforce_policy: # Whether to make audit-log policy changes. Choices: true, false
+ eseries_auditlog_force: # Forces audit-log to delete log messages when fullness threshold has been exceeded. Applicable when eseries_auditlog_full_policy=preventSystemAccess. Choices: true, false
+ eseries_auditlog_full_policy: # Policy for what to do when record limit has been reached. Choices: overWrite, preventSystemAccess
+ eseries_auditlog_log_level: # Filters logs based on the specified level. Choices: all, writeOnly
+ eseries_auditlog_max_records: # Maximum number of audit-log messages retained. Choices: 100-50000.
+ eseries_auditlog_threshold: # Memory full percentage threshold that audit-log will start issuing warning messages. Choices: 60-90
+
+ # Syslog configuration defaults
+ eseries_syslog_state: # Whether syslog servers should be added or removed from storage system. Choices: present, absent
+ eseries_syslog_address: # Syslog server IPv4 address or fully qualified hostname.
+ eseries_syslog_test: # Whether a test messages should be sent to syslog server when added to the storage system. Choices: true, false
+ eseries_syslog_protocol: # Protocol to be used when transmitting log messages to syslog server. Choices: udp, tc, tls
+ eseries_syslog_port: # Port to be used when transmitting log messages to syslog server.
+ eseries_syslog_components: # List of components log to syslog server. Choices: auditLog, (others may become available)
+
+License
+-------
+ BSD-3-Clause
+
+Author Information
+------------------
+ Nathan Swartz (@ndswartz)
diff --git a/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_management/defaults/main.yml b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_management/defaults/main.yml
new file mode 100644
index 000000000..9e99c7d2f
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_management/defaults/main.yml
@@ -0,0 +1,197 @@
+# Storage system specific credentials
+# -----------------------------------
+eseries_system_username: admin # Storage system username. Default: admin
+#eseries_system_password: # Storage system admin password.
+#eseries_validate_certs: # Whether the SSL certificates should be verified. (boolean)
+#eseries_system_subnet: # IPv4 search range for discovering E-Series storage. Must be in CIDR form.
+#eseries_system_serial: # Storage system chassis serial number. This is used to automatically discover the system.
+#eseries_system_addresses: # (list) Controller address(es) for the storage system. Only applicable for proxy web services.
+#eseries_system_tags: # Meta tags to associate to the storage system. Only applicable for proxy web services.
+#eseries_system_password: # Required when adding storage systems to SANtricity Web Services Proxy.
+#eseries_system_minimum_password_length: # Minimum required password length
+
+# SANtricity Web Services Proxy specific variables
+# ------------------------------------------------
+#eseries_proxy_ssid: # Storage array identifier. This value will be 1 when interacting with the embedded web services,
+#eseries_proxy_api_url: # Url for the web services proxy rest api. Example: https://192.168.10.100:8443/devmgr/v2
+#eseries_proxy_api_username: # Username for the web services proxy rest api.
+#eseries_proxy_api_password: # Password for the web services proxy rest api.
+#eseries_proxy_current_api_password: # This is for changing the password for the proxy.
+#eseries_proxy_api_validate_certs: # Whether the SSL certificates should be verified. (boolean)
+#eseries_proxy_minimum_password_length: # Minimum required proxy password length
+
+#eseries_proxy_discovery_subnet: # IPv4 search range for discovering E-Series storage. Must be in CIDR form.
+#eseries_proxy_accept_certifications: # Force automatic acceptance of all storage system's certificate
+#eseries_proxy_default_system_tags: # Default meta tags to associate with all storage systems
+#eseries_proxy_default_password: # Default password to associate with all storage systems
+#eseries_proxy_systems: # List of storage system information which defines which systems should be added to proxy web services.
+ # Automatically populated from storage system's inventory when not defined.
+ # See na_santricity_proxy_systems for more details.
+
+# Storage proxy non-admin passwords
+# ---------------------------------
+#eseries_proxy_monitor_password: # Proxy monitor username password
+#eseries_proxy_security_password: # Proxy security username password
+#eseries_proxy_storage_password: # Proxy storage username password
+#eseries_proxy_support_password: # Proxy support username password
+
+# Storage system non-admin passwords
+# ----------------------------------
+#eseries_system_monitor_password: # Storage system monitor username password
+#eseries_system_security_password: # Storage system security username password
+#eseries_system_storage_password: # Storage system storage username password
+#eseries_system_support_password: # Storage system support username password
+
+# Storage system defaults
+# -----------------------------
+#eseries_system_name: # Name of the storage system.
+#eseries_system_cache_block_size: # Cache block size
+#eseries_system_cache_flush_threshold: # Unwritten data will be flushed when exceeds this threshold
+#eseries_system_autoload_balance: # Whether automatic load balancing should be enabled. Choices: enabled, disabled
+#eseries_system_host_connectivity_reporting: # Whether host connectivity reporting should be enabled. Choices: enabled, disabled
+#eseries_system_login_banner_message: # Message that appears prior to the login.
+#eseries_system_default_host_type: # Only required when using something other than Linux kernel 3.10 or later with DM-MP (Linux DM-MP),
+ # non-clustered Windows (Windows), or the storage system default host type is incorrect. Common definitions below:
+ # - AIX MPIO: The Advanced Interactive Executive (AIX) OS and the native MPIO driver
+ # - AVT 4M: Silicon Graphics, Inc. (SGI) proprietary multipath driver; refer to the SGI installation documentation for more information
+ # - HP-UX: The HP-UX OS with native multipath driver
+ # - Linux ATTO: The Linux OS and the ATTO Technology, Inc. driver (must use ATTO FC HBAs)
+ # - Linux DM-MP: The Linux OS and the native DM-MP driver
+ # - Linux Pathmanager: The Linux OS and the SGI proprietary multipath driver; refer to the SGI installation documentation for more information
+ # - Mac: The Mac OS and the ATTO Technology, Inc. driver
+ # - ONTAP: FlexArray
+ # - Solaris 11 or later: The Solaris 11 or later OS and the native MPxIO driver
+ # - Solaris 10 or earlier: The Solaris 10 or earlier OS and the native MPxIO driver
+ # - SVC: IBM SAN Volume Controller
+ # - VMware: ESXi OS
+ # - Windows: Windows Server OS and Windows MPIO with a DSM driver
+ # - Windows Clustered: Clustered Windows Server OS and Windows MPIO with a DSM driver
+ # - Windows ATTO: Windows OS and the ATTO Technology, Inc. driver
+
+# Storage system SSL certificates
+# -------------------------------
+eseries_client_certificate_remove_unspecified_user_certificates: True # Whether existing user certificates should be automatically removed.
+#eseries_client_certificate_certificates: # Dictionary containing the SSL certificate file paths. The key will be used as the alias.
+#eseries_client_certificate_absent_certificates: # List of aliases to remove from the storage array's trust store.
+
+# Storage management interface defaults
+# -------------------------------------
+#eseries_management_config_method:
+#eseries_management_subnet_mask:
+#eseries_management_gateway:
+#eseries_management_dns_config_method:
+#eseries_management_dns_address:
+#eseries_management_dns_address_backup:
+#eseries_management_ntp_config_method:
+#eseries_management_ntp_address:
+#eseries_management_ntp_address_backup:
+#eseries_management_ssh:
+#eseries_management_interfaces:
+# config_method:
+# subnet_mask:
+# gateway_mask:
+# dns_config_method:
+# dns_address:
+# dns_address_backup:
+# ntp_config_method:
+# ntp_address:
+# ntp_address_backup:
+# ssh
+# controller_a:
+# config_method:
+# address:
+# subnet_mask:
+# gateway:
+# dns_config_method:
+# dns_address:
+# dns_address_backup:
+# ntp_config_method:
+# ntp_address:
+# ntp_address_backup:
+# ssh:
+# - (...)
+# controller_b:
+# - (...)
+# - (...)
+
+# Alerts configuration defaults
+# -----------------------------
+#eseries_alerts_state: # Whether to enable storage system alerts. Choices: enabled, disabled
+#eseries_alerts_contact: # This allows owner to specify free-form contact information such as email or phone number.
+#eseries_alerts_recipients: # List containing e-mails that should be sent notifications when alerts are issued.
+#eseries_alerts_sender: # Sender email. This does not necessarily need to be a valid e-mail.
+#eseries_alerts_server: # Fully qualified domain name, IPv4 address, or IPv6 address of the mail server.
+#eseries_alert_syslog_servers: # List of dictionaries where each dictionary contains a syslog server entry. [{"address": <syslog_address>, "port": 514}]
+eseries_alerts_test: false # When changes are made to the storage system alert configuration a test e-mail will be sent. Choices: true, false
+eseries_alert_syslog_test: false # When changes are made to the alerts syslog servers configuration a test message will be sent to them. Choices: true, false
+
+# LDAP configuration defaults
+# ---------------------------
+#eseries_ldap_state: # Whether LDAP should be configured
+#eseries_ldap_identifier: memberOf # The user attributes that should be considered for the group to role mapping
+#eseries_ldap_user_attribute: sAMAccountName # Attribute used to the provided username during authentication.
+#eseries_ldap_bind_username: # User account that will be used for querying the LDAP server.
+#eseries_ldap_bind_password: # Password for the bind user account
+#eseries_ldap_server: # LDAP server URL.
+#eseries_ldap_search_base: # Search base used for find user's group membership
+#eseries_ldap_role_mappings: # Dictionary of user groups, each containing the list of access roles.
+ # Role choices: storage.admin - allows users full read/writes access to storage objects and operations.
+ # storage.monitor - allows users read-only access to storage objects and operations.
+ # storage.admin - allows users access to hardware, diagnostic information, major event logs, and
+ # other critical support-related functionality, but not the sorage configuration.
+ # security.admin - allows users access to authentication/authorization configuration, as well as
+ # the audit log configuration, adn certification management.
+
+# Drive firmware defaults
+# -----------------------
+#eseries_drive_firmware_firmware_list: # Local path list for drive firmware.
+eseries_drive_firmware_wait_for_completion: true # Forces drive firmware upgrades to wait for all associated tasks to complete. Choices: true, false
+eseries_drive_firmware_ignore_inaccessible_drives: false # Forces drive firmware upgrades to ignore any inaccessible drives. Choices: true, false
+eseries_drive_firmware_upgrade_drives_online: true # Forces drive firmware upgrades to be performed while I/Os are accepted. Choices: true, false
+
+# Controller firmware defaults
+# ----------------------------
+#eseries_firmware_nvsram: # Local path for NVSRAM file.
+#eseries_firmware_firmware: # Local path for controller firmware file.
+eseries_firmware_wait_for_completion: true # Forces controller firmware upgrade to wait until upgrade has completed before continuing. Choices: true, false
+eseries_firmware_ignore_mel_events: false # Forces firmware upgrade to be attempted regardless of the health check results. Choices: true, false
+
+# ASUP configuration defaults
+# ---------------------------
+#eseries_asup_state: # Whether auto support (ASUP) should be enabled. Choices: enabled, disabled
+eseries_asup_active: true # Enables active monitoring which allows NetApp support personnel to request support data to resolve issues. Choices: true, false
+#eseries_asup_days: # List of days of the week. Choices: monday, tuesday, wednesday, thursday, friday, saturday, sunday
+eseries_asup_start: 0 # Hour of the day(s) to start ASUP bundle transmissions. Start time must be less than end time. Choices: 0-23
+eseries_asup_end: 24 # Hour of the day(s) to end ASUP bundle transmissions. Start time must be less than end time. Choices: 1-24
+#eseries_asup_method: # ASUP delivery method. Choices https, http, email (default: https)
+#eseries_asup_routing_type: # ASUP delivery routing type for https or http. Choices: direct, proxy, script (default: direct)
+#eseries_asup_proxy: # ASUP proxy delivery method information.
+# host: # ASUP proxy host IP address or FQDN. When eseries_asup_routing_type==proxy this must be specified.
+# port: # ASUP proxy host port. When eseries_asup_routing_type==proxy this must be specified.
+# script: # ASUP proxy host script.
+#eseries_asup_email: # ASUP email delivery configuration information
+# server: # ASUP email server
+# sender: # ASUP email sender
+# test_recipient: # ASUP configuration mail test recipient
+#eseries_maintenance_duration: # Duration in hours (1-72) the ASUP maintenance mode will be active
+#eseries_maintenance_emails: # List of email addresses for maintenance notifications
+#eseries_asup_validate: # Verify ASUP configuration prior to applying changes.
+
+# Audit-log configuration defaults
+# --------------------------------
+eseries_auditlog_enforce_policy: false # Whether to make audit-log policy changes. Choices: true, false
+eseries_auditlog_force: false # Forces audit-log to delete log messages when fullness threshold has been exceeded.
+ # Applicable when eseries_auditlog_full_policy=preventSystemAccess. Choices: true, false
+eseries_auditlog_full_policy: overWrite # Policy for what to do when record limit has been reached. Choices: overWrite, preventSystemAccess
+eseries_auditlog_log_level: writeOnly # Filters logs based on the specified level. Choices: all, writeOnly
+eseries_auditlog_max_records: 50000 # Maximum number of audit-log messages retained. Choices: 100-50000.
+eseries_auditlog_threshold: 90 # Memory full percentage threshold that audit-log will start issuing warning messages. Choices: 60-90
+
+# Syslog configuration defaults
+# -----------------------------
+#eseries_syslog_state: # Whether syslog servers should be added or removed from storage system. Choices: present, absent
+#eseries_syslog_address: # Syslog server IPv4 address or fully qualified hostname.
+eseries_syslog_test: false # Whether a test messages should be sent to syslog server when added to the storage system. Choices: true, false
+eseries_syslog_protocol: udp # Protocol to be used when transmitting log messages to syslog server. Choices: udp, tc, tls
+eseries_syslog_port: 514 # Port to be used when transmitting log messages to syslog server.
+eseries_syslog_components: ["auditLog"] # List of components log to syslog server. Choices: auditLog, (others may be available)
diff --git a/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_management/meta/main.yml b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_management/meta/main.yml
new file mode 100644
index 000000000..a519eeceb
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_management/meta/main.yml
@@ -0,0 +1,13 @@
+galaxy_info:
+ author: Nathan Swartz (@ndswartz)
+ description: Manages NetApp E-Series storage system's firmware, management interfaces, security, system, and logging configuration.
+ company: NetApp, Inc
+ license: BSD-3 Clause
+ platforms: []
+ min_ansible_version: 2.13
+ galaxy_tags:
+ - netapp
+ - eseries
+ - storage
+
+dependencies: [] \ No newline at end of file
diff --git a/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_management/tasks/firmware.yml b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_management/tasks/firmware.yml
new file mode 100644
index 000000000..d53d2568c
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_management/tasks/firmware.yml
@@ -0,0 +1,83 @@
+- name: Upload required drive, nvsram, and firmware files to Web Services Proxy.
+ block:
+ - name: Collect expected firmware file lists
+ ansible.builtin.set_fact:
+ eseries_proxy_drive_firmware: |-
+ {%- set drive_firmware = [] %}
+ {%- for host in ansible_play_hosts_all %}
+ {%- if hostvars[host]["current_eseries_api_is_proxy"] and "eseries_drive_firmware_firmware_list" in hostvars[host] %}
+ {%- if drive_firmware.extend(hostvars[host]["eseries_drive_firmware_firmware_list"]) %}{%- endif %}
+ {%- endif %}
+ {%- endfor %}
+ {{ drive_firmware | list }}
+ eseries_proxy_nvsram: |-
+ {%- set nvsram = [] %}
+ {%- for host in ansible_play_hosts_all %}
+ {%- if hostvars[host]["current_eseries_api_is_proxy"] and "eseries_firmware_nvsram" in hostvars[host] %}
+ {%- if nvsram.append(hostvars[host]["eseries_firmware_nvsram"]) %}{%- endif %}
+ {%- endif %}
+ {%- endfor %}
+ {{ nvsram | list }}
+ eseries_proxy_firmware: |-
+ {%- set firmware = [] %}
+ {%- for host in ansible_play_hosts_all %}
+ {%- if hostvars[host]["current_eseries_api_is_proxy"] and "eseries_firmware_firmware" in hostvars[host] %}
+ {%- if firmware.append(hostvars[host]["eseries_firmware_firmware"]) %}{%- endif %}
+ {%- endif %}
+ {%- endfor %}
+ {{ firmware | list }}
+
+ - name: Ensure SANtricity Web Services Proxy has the expected drive firmware
+ netapp_eseries.santricity.na_santricity_proxy_drive_firmware_upload:
+ api_url: "{{ eseries_proxy_api_url }}"
+ api_username: "{{ eseries_proxy_api_username }}"
+ api_password: "{{ eseries_proxy_api_password }}"
+ validate_certs: "{{ eseries_validate_certs | default(omit) }}"
+ firmware: "{{ eseries_proxy_drive_firmware | default(omit) }}"
+ connection: local
+ when: eseries_proxy_drive_firmware != []
+
+ - name: Ensure SANtricity Web Services Proxy has the expected controller firmware and NVSRAM
+ netapp_eseries.santricity.na_santricity_proxy_firmware_upload:
+ api_url: "{{ eseries_proxy_api_url }}"
+ api_username: "{{ eseries_proxy_api_username }}"
+ api_password: "{{ eseries_proxy_api_password }}"
+ validate_certs: "{{ eseries_validate_certs | default(omit) }}"
+ firmware: "{{ eseries_proxy_firmware | default(omit) + eseries_proxy_nvsram | default(omit) }}"
+ connection: local
+ when: eseries_proxy_nvsram != [] or eseries_proxy_firmware != []
+ run_once: true
+
+- name: Ensure drive firmware is properly configured
+ netapp_eseries.santricity.na_santricity_drive_firmware:
+ ssid: "{{ current_eseries_ssid }}"
+ api_url: "{{ current_eseries_api_url }}"
+ api_username: "{{ current_eseries_api_username }}"
+ api_password: "{{ current_eseries_api_password }}"
+ validate_certs: "{{ current_eseries_validate_certs | default(omit) }}"
+ firmware: "{{ eseries_drive_firmware_firmware_list }}"
+ wait_for_completion: "{{ eseries_drive_firmware_wait_for_completion | default(true) }}"
+ ignore_inaccessible_drives: "{{ eseries_drive_firmware_ignore_inaccessible_drives | default(omit) }}"
+ upgrade_drives_online: "{{ eseries_drive_firmware_upgrade_drives_online | default(omit) }}"
+ connection: local
+ when: eseries_drive_firmware_firmware_list is defined
+ tags:
+ - firmware
+ - drive_firmware
+
+- name: Ensure controller firmware is properly configured
+ netapp_eseries.santricity.na_santricity_firmware:
+ ssid: "{{ current_eseries_ssid }}"
+ api_url: "{{ current_eseries_api_url }}"
+ api_username: "{{ current_eseries_api_username }}"
+ api_password: "{{ current_eseries_api_password }}"
+ validate_certs: "{{ current_eseries_validate_certs | default(omit) }}"
+ nvsram: "{{ eseries_firmware_nvsram | default('') }}"
+ firmware: "{{ eseries_firmware_firmware | default('') }}"
+ wait_for_completion: "{{ eseries_firmware_wait_for_completion | default(true) }}"
+ clear_mel_events: "{{ eseries_firmware_ignore_mel_events | default(omit) }}"
+ connection: local
+ when: eseries_firmware_nvsram is defined or eseries_firmware_firmware is defined
+ tags:
+ - firmware
+ - controller_firmware
diff --git a/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_management/tasks/interface.yml b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_management/tasks/interface.yml
new file mode 100644
index 000000000..2e8f535fe
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_management/tasks/interface.yml
@@ -0,0 +1,171 @@
+- name: Determine individual management interface information.
+ ansible.builtin.set_fact:
+ do_not_remove: 0 # Placeholder to prevent task from failing when no interfaces are defined.
+ interface_a1: "{{ eseries_management_interfaces['controller_a'][0] | default(omit) }}"
+ interface_a2: "{{ eseries_management_interfaces['controller_a'][1] | default(omit) }}"
+ interface_b1: "{{ eseries_management_interfaces['controller_b'][0] | default(omit) }}"
+ interface_b2: "{{ eseries_management_interfaces['controller_b'][1] | default(omit) }}"
+
+# This task is only executed when no controller A interfaces are defined so global interface options can still be set.
+- name: Ensure controller A DNS, NTP and SSH configuration is set.
+ netapp_eseries.santricity.na_santricity_mgmt_interface:
+ ssid: "{{ current_eseries_ssid }}"
+ api_url: "{{ current_eseries_api_url }}"
+ api_username: "{{ current_eseries_api_username }}"
+ api_password: "{{ current_eseries_api_password }}"
+ validate_certs: "{{ current_eseries_validate_certs | default(omit) }}"
+ controller: A
+ dns_config_method: "{{ eseries_management_interfaces['dns_config_method'] |
+ default(eseries_management_dns_config_method | default(omit)) }}"
+ dns_address: "{{ eseries_management_interfaces['dns_address'] |
+ default(eseries_management_dns_address | default(omit)) }}"
+ dns_address_backup: "{{ eseries_management_interfaces['dns_address_backup'] |
+ default(eseries_management_dns_address_backup | default(omit)) }}"
+ ntp_config_method: "{{ eseries_management_interfaces['ntp_config_method'] |
+ default(eseries_management_ntp_config_method | default(omit)) }}"
+ ntp_address: "{{ eseries_management_interfaces['ntp_address'] |
+ default(eseries_management_ntp_address | default(omit)) }}"
+ ntp_address_backup: "{{ eseries_management_interfaces['ntp_address_backup'] |
+ default(eseries_management_ntp_address_backup | default(omit)) }}"
+ ssh: "{{ eseries_management_interfaces['ssh'] | default(eseries_management_ssh | default(omit)) }}"
+ when: interface_a1 is not defined and interface_a2 is not defined
+
+# This task is only executed when no controller B interfaces are defined so global interface options can still be set.
+- name: Ensure controller B DNS, NTP and SSH configuration is set.
+ netapp_eseries.santricity.na_santricity_mgmt_interface:
+ ssid: "{{ current_eseries_ssid }}"
+ api_url: "{{ current_eseries_api_url }}"
+ api_username: "{{ current_eseries_api_username }}"
+ api_password: "{{ current_eseries_api_password }}"
+ validate_certs: "{{ current_eseries_validate_certs | default(omit) }}"
+ controller: B
+ dns_config_method: "{{ eseries_management_interfaces['dns_config_method'] |
+ default(eseries_management_dns_config_method | default(omit)) }}"
+ dns_address: "{{ eseries_management_interfaces['dns_address'] |
+ default(eseries_management_dns_address | default(omit)) }}"
+ dns_address_backup: "{{ eseries_management_interfaces['dns_address_backup'] |
+ default(eseries_management_dns_address_backup | default(omit)) }}"
+ ntp_config_method: "{{ eseries_management_interfaces['ntp_config_method'] |
+ default(eseries_management_ntp_config_method | default(omit)) }}"
+ ntp_address: "{{ eseries_management_interfaces['ntp_address'] |
+ default(eseries_management_ntp_address | default(omit)) }}"
+ ntp_address_backup: "{{ eseries_management_interfaces['ntp_address_backup'] |
+ default(eseries_management_ntp_address_backup | default(omit)) }}"
+ ssh: "{{ eseries_management_interfaces['ssh'] | default(eseries_management_ssh | default(omit)) }}"
+ when: interface_b1 is not defined and interface_b2 is not defined
+
+- name: Ensure the management interface (controller A, port 1) has been configured.
+ block:
+ - name: Ensure the management interface (controller A, port 1) has been configured.
+ netapp_eseries.santricity.na_santricity_mgmt_interface:
+ ssid: "{{ current_eseries_ssid }}"
+ api_url: "{{ current_eseries_api_url }}"
+ api_username: "{{ current_eseries_api_username }}"
+ api_password: "{{ current_eseries_api_password }}"
+ validate_certs: "{{ current_eseries_validate_certs | default(omit) }}"
+ controller: A
+ port: 1
+ address: "{{ interface_a1['address'] | default(omit) }}"
+ config_method: "{{ interface_a1['config_method'] | default(eseries_management_interfaces['config_method'] | default(eseries_management_config_method | default(omit))) }}"
+ subnet_mask: "{{ interface_a1['subnet_mask'] | default(eseries_management_interfaces['subnet_mask'] | default(eseries_management_subnet_mask | default(omit))) }}"
+ gateway: "{{ interface_a1['gateway'] | default(eseries_management_interfaces['gateway'] | default(eseries_management_gateway | default(omit))) }}"
+ dns_config_method: "{{ interface_a1['dns_config_method'] | default(eseries_management_interfaces['dns_config_method'] | default(eseries_management_dns_config_method | default(omit))) }}"
+ dns_address: "{{ interface_a1['dns_address'] | default(eseries_management_interfaces['dns_address'] | default(eseries_management_dns_address | default(omit))) }}"
+ dns_address_backup: "{{ interface_a1['dns_address_backup'] | default(eseries_management_interfaces['dns_address_backup'] | default(eseries_management_dns_address_backup | default(omit))) }}"
+ ntp_config_method: "{{ interface_a1['ntp_config_method'] | default(eseries_management_interfaces['ntp_config_method'] | default(eseries_management_ntp_config_method | default(omit))) }}"
+ ntp_address: "{{ interface_a1['ntp_address'] | default(eseries_management_interfaces['ntp_address'] | default(eseries_management_ntp_address | default(omit))) }}"
+ ntp_address_backup: "{{ interface_a1['ntp_address_backup'] | default(eseries_management_interfaces['ntp_address_backup'] | default(eseries_management_ntp_address_backup | default(omit))) }}"
+ ssh: "{{ interface_a1['ssh'] | default(eseries_management_interfaces['ssh'] | default(eseries_management_ssh | default(omit))) }}"
+ connection: local
+ register: current_management_urls_a1
+ - name: Update Web Services URL
+ ansible.builtin.set_fact:
+ current_eseries_api_url: "{{ current_management_urls_a1['available_embedded_api_urls'][0] | default(current_eseries_api_url) }}"
+ when: interface_a1 is defined and interface_a1
+
+- name: Ensure the management interface (controller A, port 2) has been configured.
+ block:
+ - name: Ensure the management interface (controller A, port 2) has been configured.
+ netapp_eseries.santricity.na_santricity_mgmt_interface:
+ ssid: "{{ current_eseries_ssid }}"
+ api_url: "{{ current_eseries_api_url }}"
+ api_username: "{{ current_eseries_api_username }}"
+ api_password: "{{ current_eseries_api_password }}"
+ validate_certs: "{{ current_eseries_validate_certs | default(omit) }}"
+ controller: A
+ port: 2
+ address: "{{ interface_a2['address'] | default(omit) }}"
+ config_method: "{{ interface_a2['config_method'] | default(eseries_management_interfaces['config_method'] | default(eseries_management_config_method | default(omit))) }}"
+ subnet_mask: "{{ interface_a2['subnet_mask'] | default(eseries_management_interfaces['subnet_mask'] | default(eseries_management_subnet_mask | default(omit))) }}"
+ gateway: "{{ interface_a2['gateway'] | default(eseries_management_interfaces['gateway'] | default(eseries_management_gateway | default(omit))) }}"
+ dns_config_method: "{{ interface_a2['dns_config_method'] | default(eseries_management_interfaces['dns_config_method'] | default(eseries_management_dns_config_method | default(omit))) }}"
+ dns_address: "{{ interface_a2['dns_address'] | default(eseries_management_interfaces['dns_address'] | default(eseries_management_dns_address | default(omit))) }}"
+ dns_address_backup: "{{ interface_a2['dns_address_backup'] | default(eseries_management_interfaces['dns_address_backup'] | default(eseries_management_dns_address_backup | default(omit))) }}"
+ ntp_config_method: "{{ interface_a2['ntp_config_method'] | default(eseries_management_interfaces['ntp_config_method'] | default(eseries_management_ntp_config_method | default(omit))) }}"
+ ntp_address: "{{ interface_a2['ntp_address'] | default(eseries_management_interfaces['ntp_address'] | default(eseries_management_ntp_address | default(omit))) }}"
+ ntp_address_backup: "{{ interface_a2['ntp_address_backup'] | default(eseries_management_interfaces['ntp_address_backup'] | default(eseries_management_ntp_address_backup | default(omit))) }}"
+ ssh: "{{ interface_a2['ssh'] | default(eseries_management_interfaces['ssh'] | default(eseries_management_ssh | default(omit))) }}"
+ connection: local
+ register: current_management_urls_a2
+ - name: Try backup Web Services REST API url.
+ ansible.builtin.set_fact:
+ current_eseries_api_url: "{{ current_management_urls_a2['available_embedded_api_urls'][0] | default(current_eseries_api_url) }}"
+ when: interface_a2 is defined and interface_a2
+
+- name: Ensure the management interface (controller B, port 1) has been configured.
+ block:
+ - name: Ensure the management interface (controller B, port 1) has been configured.
+ netapp_eseries.santricity.na_santricity_mgmt_interface:
+ ssid: "{{ current_eseries_ssid }}"
+ api_url: "{{ current_eseries_api_url }}"
+ api_username: "{{ current_eseries_api_username }}"
+ api_password: "{{ current_eseries_api_password }}"
+ validate_certs: "{{ current_eseries_validate_certs | default(omit) }}"
+ controller: B
+ port: 1
+ address: "{{ interface_b1['address'] | default(omit) }}"
+ config_method: "{{ interface_b1['config_method'] | default(eseries_management_interfaces['config_method'] | default(eseries_management_config_method | default(omit))) }}"
+ subnet_mask: "{{ interface_b1['subnet_mask'] | default(eseries_management_interfaces['subnet_mask'] | default(eseries_management_subnet_mask | default(omit))) }}"
+ gateway: "{{ interface_b1['gateway'] | default(eseries_management_interfaces['gateway'] | default(eseries_management_gateway | default(omit))) }}"
+ dns_config_method: "{{ interface_b1['dns_config_method'] | default(eseries_management_interfaces['dns_config_method'] | default(eseries_management_dns_config_method | default(omit))) }}"
+ dns_address: "{{ interface_b1['dns_address'] | default(eseries_management_interfaces['dns_address'] | default(eseries_management_dns_address | default(omit))) }}"
+ dns_address_backup: "{{ interface_b1['dns_address_backup'] | default(eseries_management_interfaces['dns_address_backup'] | default(eseries_management_dns_address_backup | default(omit))) }}"
+ ntp_config_method: "{{ interface_b1['ntp_config_method'] | default(eseries_management_interfaces['ntp_config_method'] | default(eseries_management_ntp_config_method | default(omit))) }}"
+ ntp_address: "{{ interface_b1['ntp_address'] | default(eseries_management_interfaces['ntp_address'] | default(eseries_management_ntp_address | default(omit))) }}"
+ ntp_address_backup: "{{ interface_b1['ntp_address_backup'] | default(eseries_management_interfaces['ntp_address_backup'] | default(eseries_management_ntp_address_backup | default(omit))) }}"
+ ssh: "{{ interface_b1['ssh'] | default(eseries_management_interfaces['ssh'] | default(eseries_management_ssh | default(omit))) }}"
+ connection: local
+ register: current_management_urls_b1
+ - name: Try backup Web Services REST API url.
+ ansible.builtin.set_fact:
+ current_eseries_api_url: "{{ current_management_urls_b1['available_embedded_api_urls'][0] | default(current_eseries_api_url) }}"
+ when: interface_b1 is defined and interface_b1
+
+- name: Ensure the management interface (controller B, port 2) has been configured.
+ block:
+ - name: Ensure the management interface (controller B, port 2) has been configured.
+ netapp_eseries.santricity.na_santricity_mgmt_interface:
+ ssid: "{{ current_eseries_ssid }}"
+ api_url: "{{ current_eseries_api_url }}"
+ api_username: "{{ current_eseries_api_username }}"
+ api_password: "{{ current_eseries_api_password }}"
+ validate_certs: "{{ current_eseries_validate_certs | default(omit) }}"
+ controller: B
+ port: 2
+ address: "{{ interface_b2['address'] | default(omit) }}"
+ config_method: "{{ interface_b2['config_method'] | default(eseries_management_interfaces['config_method'] | default(eseries_management_config_method | default(omit))) }}"
+ subnet_mask: "{{ interface_b2['subnet_mask'] | default(eseries_management_interfaces['subnet_mask'] | default(eseries_management_subnet_mask | default(omit))) }}"
+ gateway: "{{ interface_b2['gateway'] | default(eseries_management_interfaces['gateway'] | default(eseries_management_gateway | default(omit))) }}"
+ dns_config_method: "{{ interface_b2['dns_config_method'] | default(eseries_management_interfaces['dns_config_method'] | default(eseries_management_dns_config_method | default(omit))) }}"
+ dns_address: "{{ interface_b2['dns_address'] | default(eseries_management_interfaces['dns_address'] | default(eseries_management_dns_address | default(omit))) }}"
+ dns_address_backup: "{{ interface_b2['dns_address_backup'] | default(eseries_management_interfaces['dns_address_backup'] | default(eseries_management_dns_address_backup | default(omit))) }}"
+ ntp_config_method: "{{ interface_b2['ntp_config_method'] | default(eseries_management_interfaces['ntp_config_method'] | default(eseries_management_ntp_config_method | default(omit))) }}"
+ ntp_address: "{{ interface_b2['ntp_address'] | default(eseries_management_interfaces['ntp_address'] | default(eseries_management_ntp_address | default(omit))) }}"
+ ntp_address_backup: "{{ interface_b2['ntp_address_backup'] | default(eseries_management_interfaces['ntp_address_backup'] | default(eseries_management_ntp_address_backup | default(omit))) }}"
+ ssh: "{{ interface_b2['ssh'] | default(eseries_management_interfaces['ssh'] | default(eseries_management_ssh | default(omit))) }}"
+ connection: local
+ register: current_management_urls_b2
+ - name: Try backup Web Services REST API url.
+ ansible.builtin.set_fact:
+ current_eseries_api_url: "{{ current_management_urls_b2['available_embedded_api_urls'][0] | default(current_eseries_api_url) }}"
+ when: interface_b2 is defined and interface_b2
diff --git a/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_management/tasks/logging.yml b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_management/tasks/logging.yml
new file mode 100644
index 000000000..852f22793
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_management/tasks/logging.yml
@@ -0,0 +1,95 @@
+- name: Ensure ASUP configuration
+ netapp_eseries.santricity.na_santricity_asup:
+ ssid: "{{ current_eseries_ssid }}"
+ api_url: "{{ current_eseries_api_url }}"
+ api_username: "{{ current_eseries_api_username }}"
+ api_password: "{{ current_eseries_api_password }}"
+ validate_certs: "{{ eseries_validate_certs | default(omit) }}"
+ state: "{{ eseries_asup_state }}"
+ active: "{{ eseries_asup_active | default(omit) }}"
+ days: "{{ eseries_asup_days | default(omit) }}"
+ start: "{{ eseries_asup_start | default(omit) }}"
+ end: "{{ eseries_asup_end | default(omit) }}"
+ method: "{{ eseries_asup_method | default(omit) }}"
+ routing_type: "{{ eseries_asup_routing_type | default(omit) }}"
+ proxy: "{{ eseries_asup_proxy | default(omit) }}"
+ email: "{{ eseries_asup_email | default(omit) }}"
+ maintenance_duration: "{{ eseries_maintenance_duration | default(omit) }}"
+ maintenance_emails: "{{ eseries_maintenance_emails | default(omit) }}"
+ validate: "{{ eseries_asup_validate | default(omit) }}"
+ connection: local
+ when: eseries_asup_state is defined
+ tags:
+ - logging
+ - asup
+
+- name: Ensure alerts have been configured
+ netapp_eseries.santricity.na_santricity_alerts:
+ ssid: "{{ current_eseries_ssid }}"
+ api_url: "{{ current_eseries_api_url }}"
+ api_username: "{{ current_eseries_api_username }}"
+ api_password: "{{ current_eseries_api_password }}"
+ validate_certs: "{{ eseries_validate_certs | default(omit) }}"
+ state: "{{ eseries_alerts_state }}"
+ contact: "{{ eseries_alerts_contact | default(omit) }}"
+ recipients: "{{ eseries_alerts_recipients | default(omit) }}"
+ sender: "{{ eseries_alerts_sender| default(omit) }}"
+ server: "{{ eseries_alerts_server | default(omit) }}"
+ test: "{{ eseries_alerts_test | default(omit) }}"
+ connection: local
+ when: eseries_alerts_state is defined
+ tags:
+ - logging
+ - alerts
+
+- name: Ensure auditLog configuration
+ netapp_eseries.santricity.na_santricity_auditlog:
+ ssid: "{{ current_eseries_ssid }}"
+ api_url: "{{ current_eseries_api_url }}"
+ api_username: "{{ current_eseries_api_username }}"
+ api_password: "{{ current_eseries_api_password }}"
+ validate_certs: "{{ eseries_validate_certs | default(omit) }}"
+ force: "{{ eseries_auditlog_force | default(omit) }}"
+ full_policy: "{{ eseries_auditlog_full_policy | default(omit) }}"
+ log_level: "{{ eseries_auditlog_log_level | default(omit) }}"
+ max_records: "{{ eseries_auditlog_max_records | default(omit) }}"
+ threshold: "{{ eseries_auditlog_threshold | default(omit) }}"
+ connection: local
+ when: eseries_auditlog_enforce_policy
+ tags:
+ - logging
+ - auditlog
+
+- name: Ensure components are configured to be sent to the approriate syslog servers
+ netapp_eseries.santricity.na_santricity_syslog:
+ ssid: "{{ current_eseries_ssid }}"
+ api_url: "{{ current_eseries_api_url }}"
+ api_username: "{{ current_eseries_api_username }}"
+ api_password: "{{ current_eseries_api_password }}"
+ validate_certs: "{{ eseries_validate_certs | default(omit) }}"
+ state: "{{ eseries_syslog_state }}"
+ address: "{{ eseries_syslog_address }}"
+ test: "{{ eseries_syslog_test | default(omit) }}"
+ protocol: "{{ eseries_syslog_protocol | default(omit) }}"
+ port: "{{ eseries_syslog_port | default(omit) }}"
+ components: "{{ eseries_syslog_components | default(omit) }}"
+ connection: local
+ when: eseries_syslog_state is defined and eseries_syslog_address is defined
+ tags:
+ - logging
+ - syslog
+
+- name: Ensure alerts are configured to be sent to the approriate syslog servers
+ netapp_eseries.santricity.na_santricity_alerts_syslog:
+ ssid: "{{ current_eseries_ssid }}"
+ api_url: "{{ current_eseries_api_url }}"
+ api_username: "{{ current_eseries_api_username }}"
+ api_password: "{{ current_eseries_api_password }}"
+ validate_certs: "{{ eseries_validate_certs | default(omit) }}"
+ servers: "{{ eseries_alert_syslog_servers | default(omit) }}"
+ test: "{{ eseries_alert_syslog_test | default(omit) }}"
+ connection: local
+ when: eseries_syslog_state is defined and eseries_syslog_address is defined
+ tags:
+ - logging
+ - syslog
diff --git a/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_management/tasks/main.yml b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_management/tasks/main.yml
new file mode 100644
index 000000000..f2b2bbe6e
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_management/tasks/main.yml
@@ -0,0 +1,27 @@
+- name: Set current storage system credentials
+ ansible.builtin.include_role:
+ name: netapp_eseries.santricity.nar_santricity_common
+ tasks_from: build_info.yml
+ when: current_eseries_api_url is not defined
+ tags:
+ - always
+
+- name: Ensure security settings are configured
+ ansible.builtin.import_tasks: security.yml
+
+- name: Ensure management interfaces are configured
+ ansible.builtin.import_tasks: interface.yml
+ tags:
+ - interface
+ - ntp
+ - dns
+ - ssh
+
+- name: Ensure all global system settings are configured
+ ansible.builtin.import_tasks: system.yml
+
+- name: Ensure event logging has been configured
+ ansible.builtin.import_tasks: logging.yml
+
+- name: Ensure drive and controller firmware are correct
+ ansible.builtin.import_tasks: firmware.yml
diff --git a/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_management/tasks/security.yml b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_management/tasks/security.yml
new file mode 100644
index 000000000..d176b1922
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_management/tasks/security.yml
@@ -0,0 +1,213 @@
+- name: Ensure admin password is set and is correct
+ netapp_eseries.santricity.na_santricity_auth:
+ ssid: "{{ current_eseries_ssid }}"
+ api_url: "{{ current_eseries_api_url }}"
+ api_username: "{{ current_eseries_api_username }}"
+ api_password: |-
+ {%- if current_eseries_api_is_proxy == True -%}
+ {{- current_eseries_api_password -}}
+ {%- else -%}
+ {{- eseries_system_old_password | default(current_eseries_api_password) -}}
+ {%- endif -%}
+ validate_certs: "{{ current_eseries_validate_certs | default(omit) }}"
+ current_admin_password: |-
+ {%- if current_eseries_api_is_proxy == True -%}
+ {{- eseries_system_old_password | default(eseries_system_password) -}}
+ {%- else -%}
+ {{- omit -}}
+ {%- endif -%}
+ user: admin
+ password: "{{ eseries_system_password }}"
+ connection: local
+ register: admin_password
+ when: eseries_system_password is defined
+ tags:
+ - always
+
+- name: Update current_eseries_api_password if storage system password changed.
+ block:
+ - name: Update current_eseries_api_password.
+ ansible.builtin.set_fact:
+ current_eseries_api_password: |-
+ {%- if current_eseries_api_is_proxy == True -%}
+ {{- current_eseries_api_password -}}
+ {%- else -%}
+ {{- eseries_system_password -}}
+ {%- endif -%}
+ no_log: true
+ - name: Wait for password to update
+ ansible.builtin.pause:
+ seconds: 5
+ when: admin_password['changed'] == True
+
+- name: Ensure non-admin passwords have been set
+ netapp_eseries.santricity.na_santricity_auth:
+ ssid: "{{ current_eseries_ssid }}"
+ api_url: "{{ current_eseries_api_url }}"
+ api_username: "{{ current_eseries_api_username }}"
+ api_password: "{{ current_eseries_api_password }}"
+ validate_certs: "{{ current_eseries_validate_certs | default(omit) }}"
+ current_admin_password: |-
+ {%- if current_eseries_api_is_proxy == True -%}
+ {{- eseries_system_password -}}
+ {%- else -%}
+ {{- omit -}}
+ {%- endif -%}
+ user: "{{ item['key'] }}"
+ password: "{{ item['value'] }}"
+ connection: local
+ loop: "{{ lookup('dict', non_admin_user_authentication, wantlist=True) }}"
+ vars:
+ non_admin_user_authentication: |-
+ {%- set non_admin_list = {} %}
+ {%- if eseries_system_monitor_password is defined and eseries_system_monitor_password and non_admin_list.update({"monitor": eseries_system_monitor_password})%}{%- endif %}
+ {%- if eseries_system_security_password is defined and eseries_system_security_password and non_admin_list.update({"security": eseries_system_security_password})%}{%- endif %}
+ {%- if eseries_system_storage_password is defined and eseries_system_storage_password and non_admin_list.update({"storage": eseries_system_storage_password})%}{%- endif %}
+ {%- if eseries_system_support_password is defined and eseries_system_support_password and non_admin_list.update({"support": eseries_system_support_password})%}{%- endif %}
+ {{ non_admin_list }}
+
+- name: Ensure client certificates are installed
+ netapp_eseries.santricity.na_santricity_client_certificate:
+ ssid: "{{ current_eseries_ssid }}"
+ api_url: "{{ current_eseries_api_url }}"
+ api_username: "{{ current_eseries_api_username }}"
+ api_password: "{{ current_eseries_api_password }}"
+ validate_certs: "{{ current_eseries_validate_certs | default(omit) }}"
+ certificates: "{{ certificates }}"
+ remove_unspecified_user_certificates: "{{ eseries_client_certificate_remove_unspecified_user_certificates | default(omit) }}"
+ connection: local
+ when: eseries_client_certificate_certificates is defined or eseries_client_certificate_common_certificates is defined
+ tags:
+ - security
+ - certificates
+ vars:
+ certificates: |-
+ {%- set certs = [] -%}
+
+ {#- Add common client certificates -#}
+ {%- if eseries_client_certificate_common_certificates is defined -%}
+ {%- if eseries_client_certificate_common_certificates is string -%}
+ {%- if certs.append(eseries_client_certificate_common_certificates) -%}{%- endif -%}
+ {%- elif eseries_client_certificate_common_certificates is iterable -%}
+ {%- if certs.extend(eseries_client_certificate_common_certificates) -%}{%- endif -%}
+ {%- endif -%}
+ {%- endif -%}
+
+ {#- Add controller A client certificates -#}
+ {%- if eseries_client_certificate_certificates is defined -%}
+ {%- if eseries_client_certificate_certificates is string -%}
+ {%- if eseries_client_certificate_certificates not in certs -%}
+ {%- if certs.append(eseries_client_certificate_certificates) -%}{%- endif -%}
+ {%- endif -%}
+ {%- elif eseries_client_certificate_certificates is iterable -%}
+ {%- for client_cert in eseries_client_certificate_certificates if client_cert not in certs -%}
+ {%- if certs.append(client_cert) -%}{%- endif -%}
+ {%- endfor -%}
+ {%- endif -%}
+ {%- endif -%}
+ {{- certs -}}
+
+- name: Ensure controller A server certificates are installed
+ netapp_eseries.santricity.na_santricity_server_certificate:
+ ssid: "{{ current_eseries_ssid }}"
+ api_url: "{{ current_eseries_api_url }}"
+ api_username: "{{ current_eseries_api_username }}"
+ api_password: "{{ current_eseries_api_password }}"
+ validate_certs: "{{ current_eseries_validate_certs | default(omit) }}"
+ controller: "A"
+ certificates: "{{ certificates }}"
+ passphrase: "{{ eseries_server_certificate['controller_a']['passphrase'] | default(eseries_server_certificate_common_passphrase | default(omit)) }}"
+ connection: local
+ when: eseries_server_certificate_common_certificates is defined or eseries_server_certificate['controller_a'] is defined or eseries_server_certificate['controller_a']['certificates'] is defined
+ tags:
+ - security
+ - certificates
+ vars:
+ certificates: |-
+ {%- set certs = [] -%}
+
+ {#- Add common server certificates -#}
+ {%- if eseries_server_certificate_common_certificates is defined -%}
+ {%- if eseries_server_certificate_common_certificates is string -%}
+ {%- if certs.append(eseries_server_certificate_common_certificates) -%}{%- endif -%}
+ {%- elif eseries_server_certificate_common_certificates is iterable -%}
+ {%- if certs.extend(eseries_server_certificate_common_certificates) -%}{%- endif -%}
+ {%- endif -%}
+ {%- endif -%}
+
+ {#- Add controller A certificates -#}
+ {%- if eseries_server_certificate is defined and eseries_server_certificate["controller_a"] is defined or eseries_server_certificate["controller_b"]["certificates"] is defined -%}
+ {%- if eseries_server_certificate["controller_a"]["certificates"] is string -%}
+ {%- if eseries_server_certificate["controller_a"]["certificates"] not in certs -%}
+ {%- if certs.append(eseries_server_certificate["controller_a"]["certificates"]) -%}{%- endif -%}
+ {%- endif -%}
+ {%- elif eseries_server_certificate["controller_a"]["certificates"] is iterable -%}
+ {%- for server_cert in eseries_server_certificate["controller_a"]["certificates"] if server_cert not in certs -%}
+ {%- if certs.append(server_cert) -%}{%- endif -%}
+ {%- endfor -%}
+ {%- endif -%}
+ {%- endif -%}
+ {{- certs -}}
+
+- name: Ensure controller B server certificates are installed
+ netapp_eseries.santricity.na_santricity_server_certificate:
+ ssid: "{{ current_eseries_ssid }}"
+ api_url: "{{ current_eseries_api_url }}"
+ api_username: "{{ current_eseries_api_username }}"
+ api_password: "{{ current_eseries_api_password }}"
+ validate_certs: "{{ current_eseries_validate_certs | default(omit) }}"
+ controller: "B"
+ certificates: "{{ certificates }}"
+ passphrase: "{{ eseries_server_certificate['controller_b']['passphrase'] | default(eseries_server_certificate_common_passphrase | default(omit)) }}"
+ connection: local
+ when: eseries_server_certificate_common_certificates is defined or eseries_server_certificate['controller_b'] is defined or eseries_server_certificate['controller_b']['certificates'] is defined
+ tags:
+ - security
+ - certificates
+ vars:
+ certificates: |-
+ {%- set certs = [] -%}
+
+ {#- Add common server certificates -#}
+ {%- if eseries_server_certificate_common_certificates is defined -%}
+ {%- if eseries_server_certificate_common_certificates is string -%}
+ {%- if certs.append(eseries_server_certificate_common_certificates) -%}{%- endif -%}
+ {%- elif eseries_server_certificate_common_certificates is iterable -%}
+ {%- if certs.extend(eseries_server_certificate_common_certificates) -%}{%- endif -%}
+ {%- endif -%}
+ {%- endif -%}
+
+ {#- Add controller B certificates -#}
+ {%- if eseries_server_certificate is defined and eseries_server_certificate["controller_b"] is defined or eseries_server_certificate["controller_b"]["certificates"] is defined -%}
+ {%- if eseries_server_certificate["controller_b"]["certificates"] is string -%}
+ {%- if eseries_server_certificate["controller_b"] not in certs -%}
+ {%- if certs.append(eseries_server_certificate["controller_b"]["certificates"]) -%}{%- endif -%}
+ {%- endif -%}
+ {%- elif eseries_server_certificate["controller_b"]["certificates"] is iterable -%}
+ {%- for server_cert in eseries_server_certificate["controller_b"]["certificates"] if server_cert not in certs -%}
+ {%- if certs.append(server_cert) -%}{%- endif -%}
+ {%- endfor -%}
+ {%- endif -%}
+ {%- endif -%}
+ {{- certs -}}
+
+- name: Ensure LDAP has been configured
+ netapp_eseries.santricity.na_santricity_ldap:
+ ssid: "{{ current_eseries_ssid }}"
+ api_url: "{{ current_eseries_api_url }}"
+ api_username: "{{ current_eseries_api_username }}"
+ api_password: "{{ current_eseries_api_password }}"
+ validate_certs: "{{ current_eseries_validate_certs | default(omit) }}"
+ state: "{{ eseries_ldap_state }}"
+ identifier: "{{ eseries_ldap_identifier | default(omit) }}"
+ server_url: "{{ eseries_ldap_server | default(omit) }}"
+ bind_user: "{{ eseries_ldap_bind_username | default(omit) }}"
+ bind_password: "{{ eseries_ldap_bind_password | default(omit) }}"
+ search_base: "{{ eseries_ldap_search_base | default(omit) }}"
+ user_attribute: "{{ eseries_ldap_user_attribute | default(omit) }}"
+ role_mappings: "{{ eseries_ldap_role_mappings | default(omit) }}"
+ connection: local
+ when: eseries_ldap_state is defined
+ tags:
+ - security
+ - ldap
diff --git a/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_management/tasks/system.yml b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_management/tasks/system.yml
new file mode 100644
index 000000000..8bcf87cda
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/roles/nar_santricity_management/tasks/system.yml
@@ -0,0 +1,26 @@
+- name: Ensure storage array has the correct array globals
+ netapp_eseries.santricity.na_santricity_global:
+ ssid: "{{ current_eseries_ssid }}"
+ api_url: "{{ current_eseries_api_url }}"
+ api_username: "{{ current_eseries_api_username }}"
+ api_password: "{{ current_eseries_api_password }}"
+ validate_certs: "{{ current_eseries_validate_certs | default(omit) }}"
+ name: "{{ eseries_system_name | default(omit) }}"
+ cache_block_size: "{{ eseries_system_cache_block_size | default(omit) }}"
+ cache_flush_threshold: "{{ eseries_system_cache_flush_threshold | default(omit) }}"
+ automatic_load_balancing: "{{ eseries_system_autoload_balance | default(omit) }}"
+ host_connectivity_reporting: "{{ eseries_system_host_connectivity_reporting | default(omit) }}"
+ default_host_type: "{{ eseries_system_default_host_type | default(omit) }}"
+ login_banner_message: "{{ eseries_system_login_banner_message | default(omit) }}"
+ controller_shelf_id: "{{ eseries_system_controller_shelf_id | default(omit) }}"
+ connection: local
+ when: "eseries_system_name is defined or
+ eseries_system_cache_block_size is defined or
+ eseries_system_cache_flush_threshold is defined or
+ eseries_system_autoload_balance is defined or
+ eseries_system_host_connectivity_reporting is defined or
+ eseries_system_default_host_type is defined or
+ eseries_system_login_banner_message is defined or
+ eseries_system_controller_shelf_id is defined"
+ tags:
+ - system
diff --git a/ansible_collections/netapp_eseries/santricity/tests/integration/integration_config.yml b/ansible_collections/netapp_eseries/santricity/tests/integration/integration_config.yml
new file mode 100644
index 000000000..8292ee426
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/tests/integration/integration_config.yml
@@ -0,0 +1,32 @@
+# url and credentials - all santricity modules will use this information
+ssid: "1"
+base_url: https://192.168.1.100:8443/devmgr/v2/
+username: admin
+password: adminPass
+validate_cert: false
+
+# proxy url and credentials - modules that require special api testing will use this information
+proxy_ssid: "10"
+proxy_legacy_ssid: "20"
+proxy_base_url: https://192.168.1.200:8443/devmgr/v2/
+proxy_username: admin
+proxy_password: ""
+proxy_validate_cert: false
+
+# na_santricity_auth module variable requirements in addition to both embedded and proxy credentials
+expected_serial_with_proxy_legacy: "711214012345"
+expected_serial_with_proxy_embedded: "021633012345"
+expected_serial_without_proxy: "021628012345"
+proxy_discover_subnet: 192.168.1.0/24
+systems:
+ - ssid: 10 # should match proxy_ssid above
+ addresses: ["192.168.1.110"]
+ - ssid: 20 # should match proxy_legacy_ssid above
+ addresses: ["192.168.1.120"]
+
+
+# na_santricity_ldap module variable requirements
+#bind_user: "CN=bind_user,OU=accounts,DC=test,DC=example,DC=com"
+#bind_password: "bind_password"
+#server_url: "ldap://test.example.com:389"
+#search_base: "OU=users,DC=test,DC=example,DC=com"
diff --git a/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_alerts/tasks/main.yml b/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_alerts/tasks/main.yml
new file mode 100644
index 000000000..a5463ea84
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_alerts/tasks/main.yml
@@ -0,0 +1,117 @@
+# Test code for the na_santricity_alerts module
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+
+- name: NetApp Test ASUP module
+ set_fact:
+ credentials: &creds
+ ssid: "{{ ssid }}"
+ api_url: "{{ base_url }}"
+ api_username: "{{ username }}"
+ api_password: "{{ password }}"
+ validate_certs: "{{ validate_cert }}"
+
+- name: Disable alerts
+ na_santricity_alerts:
+ <<: *creds
+ state: disabled
+- name: Get the current device alerts
+ uri:
+ url: "{{ base_url }}storage-systems/{{ ssid }}/device-alerts"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_cert }}"
+ register: current_state
+- name: Determine whether the current state is expected
+ assert:
+ that: "{{ not current_state['json']['alertingEnabled'] }}"
+ msg: "Failed to disable alerts!"
+
+- name: Set the initial alerting settings (changed, check_mode)
+ na_santricity_alerts:
+ <<: *creds
+ state: enabled
+ server: mail.example.com
+ sender: noreply@example.com
+ recipients:
+ - noreply@example.com
+ register: result
+ check_mode: true
+- name: Get the current device alerts
+ uri:
+ url: "{{ base_url }}storage-systems/{{ ssid }}/device-alerts"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_cert }}"
+ register: current_state
+- name: Determine whether the current state is expected
+ assert:
+ that: "{{ result['changed'] and not current_state['json']['alertingEnabled'] }}"
+ msg: "Failed to disable alerts!"
+
+- name: Set the initial alerting settings (changed)
+ na_santricity_alerts:
+ <<: *creds
+ state: enabled
+ server: mail.example.com
+ sender: noreply@example.com
+ recipients:
+ - noreply@example.com
+ register: result
+- name: Get the current device alerts
+ uri:
+ url: "{{ base_url }}storage-systems/{{ ssid }}/device-alerts"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_cert }}"
+ register: current_state
+- name: Determine whether the current state is expected
+ assert:
+ that: "{{ result['changed'] and current_state['json']['alertingEnabled'] and
+ current_state['json']['emailServerAddress'] == 'mail.example.com' and
+ current_state['json']['emailSenderAddress'] == 'noreply@example.com' and
+ current_state['json']['recipientEmailAddresses'] == ['noreply@example.com'] }}"
+ msg: "Failed to enable alerts!"
+
+- name: Set to different alerting settings (changed)
+ na_santricity_alerts:
+ <<: *creds
+ state: enabled
+ server: mail2.example.com
+ sender: noreply2@example.com
+ recipients:
+ - noreply@example.com
+ - noreply2@example.com
+ register: result
+- name: Get the current device alerts
+ uri:
+ url: "{{ base_url }}storage-systems/{{ ssid }}/device-alerts"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_cert }}"
+ register: current_state
+- name: Determine whether the current state is expected
+ assert:
+ that: "{{ result['changed'] and current_state['json']['alertingEnabled'] and
+ current_state['json']['emailServerAddress'] == 'mail2.example.com' and
+ current_state['json']['emailSenderAddress'] == 'noreply2@example.com' and
+ (current_state['json']['recipientEmailAddresses'] == ['noreply@example.com', 'noreply2@example.com'] or
+ current_state['json']['recipientEmailAddresses'] == ['noreply2@example.com', 'noreply@example.com']) }}"
+ msg: "Failed to enable alerts!"
+
+- name: Disable alerts again (changed)
+ na_santricity_alerts:
+ <<: *creds
+ state: disabled
+ register: result
+- name: Get the current device alerts
+ uri:
+ url: "{{ base_url }}storage-systems/{{ ssid }}/device-alerts"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_cert }}"
+ register: current_state
+- name: Determine whether the current state is expected
+ assert:
+ that: "{{ result['changed'] and not current_state['json']['alertingEnabled'] }}"
+ msg: "Failed to disable alerts!"
diff --git a/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_alerts_syslog/tasks/main.yml b/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_alerts_syslog/tasks/main.yml
new file mode 100644
index 000000000..34de206e8
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_alerts_syslog/tasks/main.yml
@@ -0,0 +1,112 @@
+# Test code for the na_santricity_alerts_syslog module
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+
+- name: Set facts for na_santricity_alerts_syslog module's intergration test.
+ set_fact:
+ credentials: &creds
+ ssid: "{{ ssid }}"
+ api_url: "{{ base_url }}"
+ api_username: "{{ username }}"
+ api_password: "{{ password }}"
+ validate_certs: "{{ validate_cert }}"
+
+- name: Delete all alert syslog servers
+ na_santricity_alerts_syslog:
+ <<: *creds
+
+- name: Add alert syslog servers (change, check_mode)
+ na_santricity_alerts_syslog:
+ <<: *creds
+ servers:
+ - address: "192.168.1.100"
+ - address: "192.168.2.100"
+ port: 514
+ - address: "192.168.3.100"
+ port: 1000
+ check_mode: true
+ register: results
+- name: Verify results
+ assert:
+ that: "{{ results['changed'] }}"
+ msg: "Unexpected results!"
+
+- name: (Repeat) Add alert syslog servers (change)
+ na_santricity_alerts_syslog:
+ <<: *creds
+ servers:
+ - address: "192.168.1.100"
+ - address: "192.168.2.100"
+ port: 514
+ - address: "192.168.3.100"
+ port: 1000
+ register: results
+- name: Verify results
+ assert:
+ that: "{{ results['changed'] }}"
+ msg: "Unexpected results!"
+
+- name: (Repeat) Add alert syslog servers (no change)
+ na_santricity_alerts_syslog:
+ <<: *creds
+ test: true
+ servers:
+ - address: "192.168.1.100"
+ - address: "192.168.2.100"
+ port: 514
+ - address: "192.168.3.100"
+ port: 1000
+ register: results
+- name: Verify results
+ assert:
+ that: "{{ not results['changed'] }}"
+ msg: "Unexpected results!"
+
+- name: Remove one alert syslog server (change)
+ na_santricity_alerts_syslog:
+ <<: *creds
+ test: true
+ servers:
+ - address: "192.168.2.100"
+ port: 514
+ - address: "192.168.3.100"
+ port: 1000
+ register: results
+- name: Verify results
+ assert:
+ that: "{{ results['changed'] }}"
+ msg: "Unexpected results!"
+
+- name: Add one alert syslog server (change)
+ na_santricity_alerts_syslog:
+ <<: *creds
+ test: true
+ servers:
+ - address: "192.168.1.100"
+ - address: "192.168.2.100"
+ port: 514
+ - address: "192.168.3.100"
+ port: 1000
+ register: results
+- name: Verify results
+ assert:
+ that: "{{ results['changed'] }}"
+ msg: "Unexpected results!"
+
+- name: Delete all alert syslog servers (change)
+ na_santricity_alerts_syslog:
+ <<: *creds
+ register: results
+- name: Verify results
+ assert:
+ that: "{{ results['changed'] }}"
+ msg: "Unexpected results!"
+
+- name: Delete all alert syslog servers (no change)
+ na_santricity_alerts_syslog:
+ <<: *creds
+ register: results
+- name: Verify results
+ assert:
+ that: "{{ not results['changed'] }}"
+ msg: "Unexpected results!"
diff --git a/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_asup/tasks/main.yml b/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_asup/tasks/main.yml
new file mode 100644
index 000000000..fd66149f6
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_asup/tasks/main.yml
@@ -0,0 +1,287 @@
+# Test code for the na_santricity_asup module
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+- name: Set credential facts
+ set_fact:
+ credentials: &creds
+ ssid: "{{ ssid }}"
+ api_url: "{{ base_url }}"
+ api_username: "{{ username }}"
+ api_password: "{{ password }}"
+ validate_certs: "{{ validate_cert }}"
+
+- name: Enable auto-support using default values
+ na_santricity_asup:
+ <<: *creds
+- name: Collect auto-support state information from the array
+ uri:
+ url: "{{ base_url }}device-asup"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ body_format: json
+ validate_certs: no
+ register: current
+- name: Validate auto-support expected default state
+ assert:
+ that: "{{ current.json.asupEnabled and
+ current.json.onDemandEnabled and
+ current.json.remoteDiagsEnabled and
+ current.json.schedule.dailyMinTime == 0 and
+ current.json.schedule.dailyMaxTime == 1439 }}"
+ msg: "Unexpected auto-support state"
+- name: Validate auto-support schedule
+ assert:
+ that: "{{ item in current.json.schedule.daysOfWeek }}"
+ msg: "{{ item }} is missing from the schedule"
+ loop: "{{ lookup('list', ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday']) }}"
+
+- name: Disable auto-support
+ na_santricity_asup:
+ <<: *creds
+ state: disabled
+- name: Collect auto-support state information from the array
+ uri:
+ url: "{{ base_url }}device-asup"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ body_format: json
+ validate_certs: no
+ register: current
+- name: Validate auto-support is disabled
+ assert:
+ that: "{{ not current.json.asupEnabled }}"
+ msg: "Auto-support failed to be disabled"
+
+- name: Enable auto-support using specific values
+ na_santricity_asup:
+ <<: *creds
+ state: enabled
+ active: true
+ start: 22
+ end: 24
+ days:
+ - friday
+ - saturday
+- name: Collect auto-support state information from the array
+ uri:
+ url: "{{ base_url }}device-asup"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ body_format: json
+ validate_certs: no
+ register: current
+- name: Validate auto-support expected state
+ assert:
+ that: "{{ current.json.asupEnabled and
+ current.json.onDemandEnabled and
+ current.json.remoteDiagsEnabled and
+ current.json.schedule.dailyMinTime == (22 * 60) and
+ current.json.schedule.dailyMaxTime == (24 * 60 - 1) }}"
+ msg: "Unexpected auto-support state"
+- name: Validate auto-support schedule
+ assert:
+ that: "{{ item in current.json.schedule.daysOfWeek }}"
+ msg: "{{ item }} is missing from the schedule"
+ loop: "{{ lookup('list', ['friday', 'saturday']) }}"
+
+- name: Auto auto-support schedule
+ na_santricity_asup:
+ <<: *creds
+ state: enabled
+ active: true
+ start: 0
+ end: 5
+ days:
+ - monday
+ - thursday
+ - sunday
+- name: Collect auto-support state information from the array
+ uri:
+ url: "{{ base_url }}device-asup"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ body_format: json
+ validate_certs: no
+ register: current
+- name: Validate auto-support expected state
+ assert:
+ that: "{{ current.json.asupEnabled and
+ current.json.onDemandEnabled and
+ current.json.remoteDiagsEnabled and
+ current.json.schedule.dailyMinTime == (0 * 60) and
+ current.json.schedule.dailyMaxTime == (5 * 60) }}"
+ msg: "Unexpected auto-support state"
+- name: Validate auto-support schedule
+ assert:
+ that: "{{ item in current.json.schedule.daysOfWeek }}"
+ msg: "{{ item }} is missing from the schedule"
+ loop: "{{ lookup('list', ['monday', 'thursday', 'sunday']) }}"
+
+- name: Repeat auto-support schedule change to verify idempotency
+ na_santricity_asup:
+ <<: *creds
+ state: enabled
+ active: true
+ start: 0
+ end: 5
+ days:
+ - monday
+ - thursday
+ - sunday
+ register: result
+- name: Collect auto-support state information from the array
+ uri:
+ url: "{{ base_url }}device-asup"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ body_format: json
+ validate_certs: no
+ register: current
+- name: Validate auto-support expected state
+ assert:
+ that: "{{ current.json.asupEnabled and
+ current.json.onDemandEnabled and
+ current.json.remoteDiagsEnabled and
+ current.json.schedule.dailyMinTime == (0 * 60) and
+ current.json.schedule.dailyMaxTime == (5 * 60) }}"
+ msg: "Unexpected auto-support state"
+- name: Validate auto-support schedule
+ assert:
+ that: "{{ item in current.json.schedule.daysOfWeek }}"
+ msg: "{{ item }} is missing from the schedule"
+ loop: "{{ lookup('list', ['monday', 'thursday', 'sunday']) }}"
+- name: Validate change was not detected
+ assert:
+ that: "{{ not result.changed }}"
+ msg: "Invalid change was detected"
+
+- name: Auto auto-support schedule
+ na_santricity_asup:
+ <<: *creds
+ state: enabled
+ active: false
+ start: 0
+ end: 5
+ days:
+ - monday
+ - thursday
+ - sunday
+- name: Collect auto-support state information from the array
+ uri:
+ url: "{{ base_url }}device-asup"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ body_format: json
+ validate_certs: no
+ register: current
+- name: Validate auto-support expected state
+ assert:
+ that: "{{ current.json.asupEnabled and not current.json.onDemandEnabled and not current.json.remoteDiagsEnabled }}"
+ msg: "Unexpected auto-support state"
+
+- name: Auto auto-support direct delivery method
+ na_santricity_asup:
+ <<: *creds
+ state: enabled
+ method: http
+ routing_type: direct
+- name: Collect auto-support state information from the array
+ uri:
+ url: "{{ base_url }}device-asup"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ body_format: json
+ validate_certs: no
+ register: current
+- name: Validate auto-support expected state
+ assert:
+ that: "{{ current['json']['delivery']['method'] == 'http' }}"
+ msg: "Delievery method should be http!"
+
+- name: Auto auto-support direct delivery method
+ na_santricity_asup:
+ <<: *creds
+ state: enabled
+ method: https
+ routing_type: direct
+- name: Collect auto-support state information from the array
+ uri:
+ url: "{{ base_url }}device-asup"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ body_format: json
+ validate_certs: no
+ register: current
+- name: Validate auto-support expected state
+ assert:
+ that: "{{ current['json']['delivery']['method'] == 'https' }}"
+ msg: "Delievery method should be https!"
+
+- name: Auto auto-support proxy delivery method
+ na_santricity_asup:
+ <<: *creds
+ state: enabled
+ method: https
+ routing_type: proxy
+ proxy:
+ host: 192.168.1.1
+ port: 1000
+- name: Collect auto-support state information from the array
+ uri:
+ url: "{{ base_url }}device-asup"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ body_format: json
+ validate_certs: no
+ register: current
+- name: Validate auto-support expected state
+ assert:
+ that: "{{ current['json']['delivery']['method'] == 'https' and
+ current['json']['delivery']['proxyHost'] == '192.168.1.1' and
+ current['json']['delivery']['proxyPort'] == 1000 }}"
+ msg: "Delievery method should be https-proxy-host!"
+
+- name: Auto auto-support proxy-script delivery method
+ na_santricity_asup:
+ <<: *creds
+ state: enabled
+ method: https
+ routing_type: script
+ proxy:
+ script: autosupport_script.sh
+- name: Collect auto-support state information from the array
+ uri:
+ url: "{{ base_url }}device-asup"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ body_format: json
+ validate_certs: no
+ register: current
+- name: Validate auto-support expected state
+ assert:
+ that: "{{ current['json']['delivery']['method'] == 'https' and
+ current['json']['delivery']['proxyScript'] == 'autosupport_script.sh' }}"
+ msg: "Delievery method should be https-proxy-script!"
+
+- name: Auto auto-support email delivery method
+ na_santricity_asup:
+ <<: *creds
+ state: enabled
+ method: email
+ email:
+ server: server@example.com
+ sender: noreply@example.com
+- name: Collect auto-support state information from the array
+ uri:
+ url: "{{ base_url }}device-asup"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ body_format: json
+ validate_certs: no
+ register: current
+- name: Validate auto-support expected state
+ assert:
+ that: "{{ current['json']['delivery']['method'] == 'smtp' and
+ current['json']['delivery']['mailRelayServer'] == 'server@example.com' and
+ current['json']['delivery']['mailSenderAddress'] == 'noreply@example.com' }}"
+ msg: "Delievery method should be email!"
diff --git a/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_auditlog/tasks/main.yml b/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_auditlog/tasks/main.yml
new file mode 100644
index 000000000..424ba2e55
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_auditlog/tasks/main.yml
@@ -0,0 +1,220 @@
+# Test code for the na_santricity_auditlog module
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+
+# Note: If audit-log is full then clear it before testing, since it can result in expect 422, symbol errors.
+- name: Set credential facts
+ set_fact:
+ credentials: &creds
+ ssid: "{{ ssid }}"
+ api_url: "{{ base_url }}"
+ api_username: "{{ username }}"
+ api_password: "{{ password }}"
+ validate_certs: "{{ validate_cert }}"
+ proxy_credentials: &proxy_creds
+ ssid: "PROXY"
+ api_url: "{{ proxy_base_url }}"
+ api_username: "{{ proxy_username }}"
+ api_password: "{{ proxy_password }}"
+ validate_certs: "{{ proxy_validate_cert }}"
+ proxy_embedded_credentials: &proxy_embedded_creds
+ ssid: "{{ proxy_ssid }}"
+ api_url: "{{ proxy_base_url }}"
+ api_username: "{{ proxy_username }}"
+ api_password: "{{ proxy_password }}"
+ validate_certs: "{{ proxy_validate_cert }}"
+
+- name: Set audit log settings to the defaults
+ na_santricity_auditlog:
+ <<: *creds
+- name: Retrieve current auditlog config settings
+ uri:
+ url: "{{ base_url }}storage-systems/{{ ssid }}/audit-log/config"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: no
+ register: config
+- name: Validate change status
+ assert:
+ that: "{{ config['json']['auditLogMaxRecords'] == 50000 and
+ config['json']['auditLogLevel'] == 'writeOnly' and
+ config['json']['auditLogFullPolicy'] == 'overWrite' and
+ config['json']['auditLogWarningThresholdPct'] == 90 }}"
+ msg: "Config settings are not correct!"
+
+- name: Change audit log settings. (change, check_mode)
+ na_santricity_auditlog:
+ <<: *creds
+ max_records: 50000
+ log_level: all
+ full_policy: preventSystemAccess
+ threshold: 60
+ register: result
+ check_mode: true
+- name: Retrieve current auditlog config settings
+ uri:
+ url: "{{ base_url }}storage-systems/{{ ssid }}/audit-log/config"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: no
+ register: config
+- name: Validate change status
+ assert:
+ that: "{{ result['changed'] and config['json']['auditLogMaxRecords'] == 50000 and
+ config['json']['auditLogLevel'] == 'writeOnly' and
+ config['json']['auditLogFullPolicy'] == 'overWrite' and
+ config['json']['auditLogWarningThresholdPct'] == 90 }}"
+ msg: "Config settings are not correct!"
+
+- name: Change audit log settings. (change)
+ na_santricity_auditlog:
+ <<: *creds
+ max_records: 10000
+ log_level: all
+ full_policy: preventSystemAccess
+ threshold: 60
+ register: result
+- name: Retrieve current auditlog config settings
+ uri:
+ url: "{{ base_url }}storage-systems/{{ ssid }}/audit-log/config"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: no
+ register: config
+- name: Validate change status
+ assert:
+ that: "{{ result['changed'] and config['json']['auditLogMaxRecords'] == 10000 and
+ config['json']['auditLogLevel'] == 'all' and
+ config['json']['auditLogFullPolicy'] == 'preventSystemAccess' and
+ config['json']['auditLogWarningThresholdPct'] == 60 }}"
+ msg: "Config settings are not correct!"
+
+- name: Set audit log settings to the defaults (proxy)
+ na_santricity_auditlog:
+ <<: *proxy_creds
+- name: Retrieve current auditlog config settings
+ uri:
+ url: "{{ proxy_base_url }}audit-log/config"
+ user: "{{ proxy_username }}"
+ password: "{{ proxy_password }}"
+ validate_certs: no
+ register: config
+- name: Validate change status
+ assert:
+ that: "{{ config['json']['auditLogMaxRecords'] == 50000 and
+ config['json']['auditLogLevel'] == 'writeOnly' and
+ config['json']['auditLogFullPolicy'] == 'overWrite' and
+ config['json']['auditLogWarningThresholdPct'] == 90 }}"
+ msg: "Config settings are not correct!"
+
+- name: Change audit log settings. (proxy) (change, check_mode)
+ na_santricity_auditlog:
+ <<: *proxy_creds
+ max_records: 50000
+ log_level: all
+ full_policy: preventSystemAccess
+ threshold: 60
+ register: result
+ check_mode: true
+- name: Retrieve current auditlog config settings
+ uri:
+ url: "{{ proxy_base_url }}audit-log/config"
+ user: "{{ proxy_username }}"
+ password: "{{ proxy_password }}"
+ validate_certs: no
+ register: config
+- name: Validate change status
+ assert:
+ that: "{{ result['changed'] and config['json']['auditLogMaxRecords'] == 50000 and
+ config['json']['auditLogLevel'] == 'writeOnly' and
+ config['json']['auditLogFullPolicy'] == 'overWrite' and
+ config['json']['auditLogWarningThresholdPct'] == 90 }}"
+ msg: "Config settings are not correct!"
+
+- name: Change audit log settings. (proxy) (change)
+ na_santricity_auditlog:
+ <<: *proxy_creds
+ max_records: 10000
+ log_level: all
+ full_policy: preventSystemAccess
+ threshold: 60
+ register: result
+- name: Retrieve current auditlog config settings
+ uri:
+ url: "{{ proxy_base_url }}audit-log/config"
+ user: "{{ proxy_username }}"
+ password: "{{ proxy_password }}"
+ validate_certs: no
+ register: config
+- name: Validate change status
+ assert:
+ that: "{{ result['changed'] and config['json']['auditLogMaxRecords'] == 10000 and
+ config['json']['auditLogLevel'] == 'all' and
+ config['json']['auditLogFullPolicy'] == 'preventSystemAccess' and
+ config['json']['auditLogWarningThresholdPct'] == 60 }}"
+ msg: "Config settings are not correct!"
+
+- name: Set audit log settings to the defaults (proxy)
+ na_santricity_auditlog:
+ <<: *proxy_embedded_creds
+- name: Retrieve current auditlog config settings
+ uri:
+ url: "{{ base_url }}storage-systems/{{ ssid }}/audit-log/config"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: no
+ register: config
+- name: Validate change status
+ assert:
+ that: "{{ config['json']['auditLogMaxRecords'] == 50000 and
+ config['json']['auditLogLevel'] == 'writeOnly' and
+ config['json']['auditLogFullPolicy'] == 'overWrite' and
+ config['json']['auditLogWarningThresholdPct'] == 90 }}"
+ msg: "Config settings are not correct!"
+
+- name: Change audit log settings. (proxy) (change, check_mode)
+ na_santricity_auditlog:
+ <<: *proxy_embedded_creds
+ max_records: 50000
+ log_level: all
+ full_policy: preventSystemAccess
+ threshold: 60
+ register: result
+ check_mode: true
+- name: Retrieve current auditlog config settings
+ uri:
+ url: "{{ base_url }}storage-systems/{{ ssid }}/audit-log/config"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: no
+ register: config
+- name: Validate change status
+ assert:
+ that: "{{ result['changed'] and config['json']['auditLogMaxRecords'] == 50000 and
+ config['json']['auditLogLevel'] == 'writeOnly' and
+ config['json']['auditLogFullPolicy'] == 'overWrite' and
+ config['json']['auditLogWarningThresholdPct'] == 90 }}"
+ msg: "Config settings are not correct!"
+
+- name: Change audit log settings. (proxy) (change)
+ na_santricity_auditlog:
+ <<: *proxy_embedded_creds
+ max_records: 10000
+ log_level: all
+ full_policy: preventSystemAccess
+ threshold: 60
+ register: result
+- name: Retrieve current auditlog config settings
+ uri:
+ url: "{{ base_url }}storage-systems/{{ ssid }}/audit-log/config"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: no
+ register: config
+- name: Validate change status
+ assert:
+ that: "{{ result['changed'] and config['json']['auditLogMaxRecords'] == 10000 and
+ config['json']['auditLogLevel'] == 'all' and
+ config['json']['auditLogFullPolicy'] == 'preventSystemAccess' and
+ config['json']['auditLogWarningThresholdPct'] == 60 }}"
+ msg: "Config settings are not correct!"
diff --git a/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_auth/tasks/main.yml b/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_auth/tasks/main.yml
new file mode 100644
index 000000000..12c552520
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_auth/tasks/main.yml
@@ -0,0 +1,170 @@
+# Test code for the na_santricity_alerts module
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+
+# Clear embedded, legacy, and passwords before executing integration tests!
+
+- name: Set initial credential variables
+ set_fact:
+ credentials: &creds
+ ssid: "{{ ssid }}"
+ api_url: "{{ base_url }}"
+ api_username: "{{ username }}"
+ api_password: "{{ password }}"
+ validate_certs: "{{ validate_cert }}"
+ proxy_credentials: &proxy_creds
+ ssid: "{{ proxy_ssid }}"
+ api_url: "{{ proxy_base_url }}"
+ api_username: "{{ proxy_username }}"
+ api_password: "{{ proxy_password }}"
+ validate_certs: "{{ proxy_validate_cert }}"
+ proxy_legacy_credentials: &proxy_legacy_creds
+ ssid: "{{ proxy_legacy_ssid }}"
+ api_url: "{{ proxy_base_url }}"
+ api_username: "{{ proxy_username }}"
+ api_password: "{{ proxy_password }}"
+ validate_certs: "{{ proxy_validate_cert }}"
+
+# TODO: series of tests for embedded
+# Validate admin passwords are updated regardless of supplied api_password and current_admin_password options
+- name: Set storage system's initial admin password (embedded, changed)
+ na_santricity_auth:
+ <<: *creds
+ minimum_password_length: 8
+ password: infiniti
+ user: admin
+
+- name: Set storage system's non-admin passwords (embedded, changed)
+ na_santricity_auth:
+ <<: *creds
+ password: "{{ item }}_password"
+ user: "{{ item }}"
+ ignore_errors: true
+ loop: ["monitor", "support", "security", "storage"]
+
+- name: Set storage system's initial admin password (embedded, changed)
+ na_santricity_auth:
+ <<: *creds
+ minimum_password_length: 0
+ password: ""
+ user: admin
+
+- name: Set storage system's initial admin password (embedded, changed)
+ na_santricity_auth:
+ ssid: "{{ ssid }}"
+ api_url: "{{ base_url }}"
+ api_username: "{{ username }}"
+ api_password: ""
+ validate_certs: "{{ validate_cert }}"
+ minimum_password_length: 8
+
+- name: Set proxy's initial password (proxy, changed)
+ na_santricity_auth:
+ ssid: proxy
+ api_url: "{{ proxy_base_url }}"
+ api_username: "{{ proxy_username }}"
+ api_password: "{{ proxy_password }}"
+ validate_certs: "{{ proxy_validate_cert }}"
+ password: infiniti
+ user: admin
+
+# # TODO: series of tests for proxy
+- name: Add storage systems to proxy without passwords
+ na_santricity_proxy_systems:
+ api_url: "{{ proxy_base_url }}"
+ api_username: "{{ proxy_username }}"
+ api_password: "{{ proxy_password }}"
+ validate_certs: "{{ proxy_validate_cert }}"
+ subnet_mask: "{{ proxy_discover_subnet }}"
+ systems: "{{ systems }}"
+ password: ""
+
+# Validate proxy system's admin passwords are updated regardless of current_admin_password options
+- name: Set storage system's initial password (proxy system with embedded, changed)
+ na_santricity_auth:
+ <<: *proxy_creds
+ minimum_password_length: 8
+ current_admin_password: "" # THIS NEEDS TO MATCH STORAGE SYSTEM'S STORED-PASSWORD
+ password: infiniti
+ user: admin
+
+- name: Set storage system's initial password (proxy system without embedded, changed)
+ na_santricity_auth:
+ <<: *proxy_legacy_creds
+ minimum_password_length: 8
+ current_admin_password: "" # THIS NEEDS TO MATCH LEGACY STORAGE SYSTEM'S STORED-PASSWORD
+ password: infiniti
+ user: admin
+
+- pause: seconds=10
+
+- name: Set storage system's initial password (proxy system with embedded, changed)
+ na_santricity_auth:
+ ssid: "10"
+ api_url: "{{ proxy_base_url }}"
+ api_username: "{{ proxy_username }}"
+ api_password: "{{ proxy_password }}"
+ validate_certs: "{{ proxy_validate_cert }}"
+ current_admin_password: infiniti # THIS NEEDS TO MATCH STORAGE SYSTEM'S STORED-PASSWORD
+ password: "{{ item }}_password"
+ user: "{{ item }}"
+ loop: ["monitor", "support", "security", "storage"]
+
+- name: Set storage system's initial password (proxy system with embedded, changed)
+ na_santricity_auth:
+ ssid: "10"
+ api_url: "{{ proxy_base_url }}"
+ api_username: "{{ proxy_username }}"
+ api_password: "{{ proxy_password }}"
+ validate_certs: "{{ proxy_validate_cert }}"
+ current_admin_password: infiniti # THIS NEEDS TO MATCH STORAGE SYSTEM'S STORED-PASSWORD
+ minimum_password_length: 0
+ password: ""
+ user: admin
+
+- name: Set storage system's initial password (proxy system without embedded, changed)
+ na_santricity_auth:
+ ssid: "20"
+ api_url: "{{ proxy_base_url }}"
+ api_username: "{{ proxy_username }}"
+ api_password: "{{ proxy_password }}"
+ validate_certs: "{{ proxy_validate_cert }}"
+ current_admin_password: infiniti # THIS NEEDS TO MATCH STORAGE SYSTEM'S STORED-PASSWORD
+ password: ""
+ user: admin
+
+- name: Set storage system's initial password (proxy system without embedded, changed)
+ na_santricity_auth:
+ ssid: proxy
+ api_url: "{{ proxy_base_url }}"
+ api_username: "{{ proxy_username }}"
+ api_password: "{{ proxy_password }}"
+ validate_certs: "{{ proxy_validate_cert }}"
+ minimum_password_length: 0
+ password: ""
+ user: admin
+
+- name: Set storage system's initial password (proxy system with embedded, changed)
+ na_santricity_auth:
+ ssid: Proxy
+ api_url: "{{ proxy_base_url }}"
+ api_username: "{{ proxy_username }}"
+ api_password: "" # THIS NEEDS TO MATCH PROXY'S PASSWORD
+ validate_certs: "{{ proxy_validate_cert }}"
+ minimum_password_length: 8
+
+- name: Set storage system's initial password (proxy system with embedded, changed)
+ na_santricity_auth:
+ ssid: "10"
+ api_url: "{{ proxy_base_url }}"
+ api_username: "{{ proxy_username }}"
+ api_password: "" # THIS NEEDS TO MATCH PROXY'S PASSWORD
+ validate_certs: "{{ proxy_validate_cert }}"
+ minimum_password_length: 8
+
+- name: Remove storage system from proxy
+ na_santricity_proxy_systems:
+ api_url: "{{ proxy_base_url }}"
+ api_username: "{{ proxy_username }}"
+ api_password: "" # THIS NEEDS TO MATCH PROXY'S PASSWORD
+ validate_certs: "{{ proxy_validate_cert }}"
diff --git a/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_client_certificate/tasks/main.yml b/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_client_certificate/tasks/main.yml
new file mode 100644
index 000000000..9f3964d96
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_client_certificate/tasks/main.yml
@@ -0,0 +1,55 @@
+# Test code for the na_santricity_alerts module
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+- name: Set credential facts
+ set_fact:
+ credentials: &creds
+ ssid: "{{ ssid }}"
+ api_url: "{{ base_url }}"
+ api_username: "{{ username }}"
+ api_password: "{{ password }}"
+ validate_certs: "{{ validate_cert }}"
+ certificates:
+ - "/home/swartzn/ExampleRootCA.crt"
+ - "/home/swartzn/ExampleIssuingCA.crt"
+ - "/home/swartzn/ExampleClient.crt"
+
+- name: Remove certificates
+ na_santricity_client_certificate:
+ <<: *creds
+
+- name: Upload certificate (changed, check_mode)
+ na_santricity_client_certificate:
+ <<: *creds
+ certificates: "{{ certificates }}"
+ register: result
+ check_mode: true
+- assert:
+ that: "{{ result['changed'] }}"
+ msg: "Failed to upload certificates to storage array."
+
+- name: Upload certificate (changed)
+ na_santricity_client_certificate:
+ <<: *creds
+ certificates: "{{ certificates }}"
+ register: result
+- assert:
+ that: "{{ result['changed'] }}"
+ msg: "Failed to upload certificates to storage array."
+
+- name: Repeat upload certificate (no change)
+ na_santricity_client_certificate:
+ <<: *creds
+ certificates: "{{ certificates }}"
+ register: result
+- assert:
+ that: "{{ not result['changed'] }}"
+ msg: "Failed not to make any changes."
+
+- name: Remove certificates
+ na_santricity_client_certificate:
+ <<: *creds
+ register: result
+- assert:
+ that: "{{ result['changed'] }}"
+ msg: "Failed to remove uploaded certificates" \ No newline at end of file
diff --git a/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_discover/tasks/main.yml b/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_discover/tasks/main.yml
new file mode 100644
index 000000000..38c18f977
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_discover/tasks/main.yml
@@ -0,0 +1,64 @@
+# Test code for the na_santricity_discover module
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+
+- name: Discover storage systems using SANtricity Web Services Proxy
+ na_santricity_discover:
+ proxy_url: "{{ proxy_base_url }}"
+ proxy_username: "{{ proxy_username }}"
+ proxy_password: "{{ proxy_password }}"
+ proxy_validate_certs: "{{ proxy_validate_cert }}"
+ subnet_mask: "{{ proxy_discover_subnet }}"
+ prefer_embedded: false
+ register: systems
+- name: find storage system
+ set_fact:
+ api_url: |-
+ {%- for system_serial in (systems["systems_found"].keys() | list) -%}
+ {%- if system_serial == expected_serial_with_proxy_legacy %}
+ {{- systems["systems_found"][system_serial]["api_urls"][0] -}}
+ {%- endif -%}
+ {%- endfor -%}
+- name: Verify storage system is found
+ fail:
+ msg: "Storage system was not discovered"
+ when: api_url == "" or api_url != proxy_base_url
+
+- name: Discover storage systems using SANtricity Web Services Proxy with a preference for embedded url
+ na_santricity_discover:
+ proxy_url: "{{ proxy_base_url }}"
+ proxy_username: "{{ proxy_username }}"
+ proxy_password: "{{ proxy_password }}"
+ proxy_validate_certs: "{{ proxy_validate_cert }}"
+ subnet_mask: "{{ proxy_discover_subnet }}"
+ prefer_embedded: true
+ register: systems
+- name: find storage system
+ set_fact:
+ api_url: |-
+ {%- for system_serial in (systems["systems_found"].keys() | list) -%}
+ {%- if system_serial == expected_serial_with_proxy_embedded %}
+ {{- systems["systems_found"][system_serial]["api_urls"][0] -}}
+ {%- endif -%}
+ {%- endfor -%}
+- name: Verify storage system is found
+ fail:
+ msg: "Storage system was not discovered"
+ when: api_url == "" or api_url == proxy_base_url
+
+- name: Discover storage systems not using SANtricity Web Services Proxy (requires SANtricity version 11.60.2 or later)
+ na_santricity_discover:
+ subnet_mask: "{{ proxy_discover_subnet }}"
+ register: systems
+- name: find storage system
+ set_fact:
+ api_url: |-
+ {%- for system_serial in (systems["systems_found"].keys() | list) -%}
+ {%- if system_serial == expected_serial_without_proxy %}
+ {{- systems["systems_found"][system_serial]["api_urls"][0] -}}
+ {%- endif -%}
+ {%- endfor -%}
+- name: Verify storage system is found
+ fail:
+ msg: "Storage system was not discovered"
+ when: api_url == ""
diff --git a/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_drive_firmware/tasks/main.yml b/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_drive_firmware/tasks/main.yml
new file mode 100644
index 000000000..5559691dc
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_drive_firmware/tasks/main.yml
@@ -0,0 +1,185 @@
+# Test code for the na_santricity_drive_firmware module
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+
+# Existing symbol issue: occasionally symbol will return 422 which causes Ansible to fail; however the drive firmware download will complete.
+# Work-around: Remove all storage provisioning before commencing test.
+
+- name: Set necessary credentials and other facts.
+ set_fact:
+ credentials: &creds
+ ssid: "{{ ssid }}"
+ api_url: "{{ base_url }}"
+ api_username: "{{ username }}"
+ api_password: "{{ password }}"
+ validate_certs: "{{ validate_cert }}"
+ firmware:
+ downgrade:
+ list:
+ - "/home/swartzn/Downloads/drive firmware/D_PX04SVQ160_DOWNGRADE_MS00toMSB6_801.dlp"
+ - "/home/swartzn/Downloads/drive firmware/D_ST1200MM0017_DNGRADE_MS02toMS00_6600_802.dlp"
+ check:
+ - firmware: "D_PX04SVQ160_DOWNGRADE_MS00toMSB6_801.dlp"
+ drive: "PX04SVQ160"
+ version: "MSB6"
+ - firmware: "D_ST1200MM0017_DNGRADE_MS02toMS00_6600_802.dlp"
+ drive: "ST1200MM0017"
+ version: "MS00"
+ upgrade:
+ list:
+ - "/home/swartzn/Downloads/drive firmware/D_PX04SVQ160_30603183_MS00_6600_001.dlp"
+ - "/home/swartzn/Downloads/drive firmware/D_ST1200MM0017_30602214_MS02_5600_002.dlp"
+ check:
+ - firmware: "D_PX04SVQ160_30603183_MS00_6600_001.dlp"
+ drive: "PX04SVQ160"
+ version: "MS00"
+ - firmware: "D_ST1200MM0017_30602214_MS02_5600_002.dlp"
+ drive: "ST1200MM0017"
+ version: "MS02"
+
+- name: Set drive firmware (baseline, maybe change)
+ netapp_e_drive_firmware:
+ <<: *creds
+ firmware: "{{ firmware['downgrade']['list'] }}"
+ wait_for_completion: true
+ ignore_inaccessible_drives: true
+ upgrade_drives_online: false
+ register: drive_firmware
+- pause: seconds=5
+- name: Retrieve current firmware version
+ uri:
+ url: "{{ base_url }}storage-systems/{{ ssid }}/drives"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: no
+ register: current_drive_firmware
+- name: Check if drive firmware is the expected versions
+ assert:
+ that: "{{ (item['productID'].strip() not in [firmware['downgrade']['check'][0]['drive'], firmware['downgrade']['check'][1]['drive']]) or
+ (firmware['downgrade']['check'][0]['drive'] == item['productID'].strip() and
+ firmware['downgrade']['check'][0]['version'] == item['softwareVersion']) or
+ (firmware['downgrade']['check'][1]['drive'] == item['productID'].strip() and
+ firmware['downgrade']['check'][1]['version'] == item['softwareVersion']) }}"
+ msg: "Drive firmware failed to update all drives"
+ loop: "{{ lookup('list', current_drive_firmware['json']) }}"
+
+- name: Set drive firmware (upgrade, change-checkmode)
+ netapp_e_drive_firmware:
+ <<: *creds
+ firmware: "{{ firmware['upgrade']['list'] }}"
+ wait_for_completion: true
+ ignore_inaccessible_drives: true
+ upgrade_drives_online: false
+ register: drive_firmware
+ check_mode: true
+- pause: seconds=5
+- name: Retrieve current firmware version
+ uri:
+ url: "{{ base_url }}storage-systems/{{ ssid }}/drives"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: no
+ register: current_drive_firmware
+- name: Validate change status
+ assert:
+ that: "{{ drive_firmware.changed }}"
+ msg: "Change status is incorrect."
+- name: Check if drive firmware is the expected versions
+ assert:
+ that: "{{ (item['productID'].strip() not in [firmware['downgrade']['check'][0]['drive'], firmware['downgrade']['check'][1]['drive']]) or
+ (firmware['downgrade']['check'][0]['drive'] == item['productID'].strip() and
+ firmware['downgrade']['check'][0]['version'] == item['softwareVersion']) or
+ (firmware['downgrade']['check'][1]['drive'] == item['productID'].strip() and
+ firmware['downgrade']['check'][1]['version'] == item['softwareVersion']) }}"
+ msg: "Drive firmware failed to update all drives"
+ loop: "{{ lookup('list', current_drive_firmware['json']) }}"
+
+- name: Set drive firmware (upgrade, change)
+ netapp_e_drive_firmware:
+ <<: *creds
+ firmware: "{{ firmware['upgrade']['list'] }}"
+ wait_for_completion: true
+ ignore_inaccessible_drives: true
+ upgrade_drives_online: false
+ register: drive_firmware
+- pause: seconds=5
+- name: Retrieve current firmware version
+ uri:
+ url: "{{ base_url }}storage-systems/{{ ssid }}/drives"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: no
+ register: current_drive_firmware
+- name: Validate change status
+ assert:
+ that: "{{ drive_firmware.changed }}"
+ msg: "Change status is incorrect."
+- name: Check if drive firmware is the expected versions
+ assert:
+ that: "{{ (item['productID'].strip() not in [firmware['downgrade']['check'][0]['drive'], firmware['downgrade']['check'][1]['drive']]) or
+ (firmware['upgrade']['check'][0]['drive'] == item['productID'].strip() and
+ firmware['upgrade']['check'][0]['version'] == item['softwareVersion']) or
+ (firmware['upgrade']['check'][1]['drive'] == item['productID'].strip() and
+ firmware['upgrade']['check'][1]['version'] == item['softwareVersion']) }}"
+ msg: "Drive firmware failed to update all drives"
+ loop: "{{ lookup('list', current_drive_firmware['json']) }}"
+
+- name: Set drive firmware (upgrade, no change)
+ netapp_e_drive_firmware:
+ <<: *creds
+ firmware: "{{ firmware['upgrade']['list'] }}"
+ wait_for_completion: true
+ ignore_inaccessible_drives: true
+ upgrade_drives_online: false
+ register: drive_firmware
+- pause: seconds=5
+- name: Retrieve current firmware version
+ uri:
+ url: "{{ base_url }}storage-systems/{{ ssid }}/drives"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: no
+ register: current_drive_firmware
+- name: Validate change status
+ assert:
+ that: "{{ not drive_firmware.changed }}"
+ msg: "Change status is incorrect."
+- name: Check if drive firmware is the expected versions
+ assert:
+ that: "{{ (item['productID'].strip() not in [firmware['downgrade']['check'][0]['drive'], firmware['downgrade']['check'][1]['drive']]) or
+ (firmware['upgrade']['check'][0]['drive'] == item['productID'].strip() and
+ firmware['upgrade']['check'][0]['version'] == item['softwareVersion']) or
+ (firmware['upgrade']['check'][1]['drive'] == item['productID'].strip() and
+ firmware['upgrade']['check'][1]['version'] == item['softwareVersion']) }}"
+ msg: "Drive firmware failed to update all drives"
+ loop: "{{ lookup('list', current_drive_firmware['json']) }}"
+
+- name: Set drive firmware (downgrade, change)
+ netapp_e_drive_firmware:
+ <<: *creds
+ firmware: "{{ firmware['downgrade']['list'] }}"
+ wait_for_completion: true
+ ignore_inaccessible_drives: true
+ upgrade_drives_online: false
+ register: drive_firmware
+- pause: seconds=5
+- name: Retrieve current firmware version
+ uri:
+ url: "{{ base_url }}storage-systems/{{ ssid }}/drives"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: no
+ register: current_drive_firmware
+- name: Validate change status
+ assert:
+ that: "{{ drive_firmware.changed }}"
+ msg: "Change status is incorrect."
+- name: Check if drive firmware is the expected versions
+ assert:
+ that: "{{ (item['productID'].strip() not in [firmware['downgrade']['check'][0]['drive'], firmware['downgrade']['check'][1]['drive']]) or
+ (firmware['downgrade']['check'][0]['drive'] == item['productID'].strip() and
+ firmware['downgrade']['check'][0]['version'] == item['softwareVersion']) or
+ (firmware['downgrade']['check'][1]['drive'] == item['productID'].strip() and
+ firmware['downgrade']['check'][1]['version'] == item['softwareVersion']) }}"
+ msg: "Drive firmware failed to update all drives"
+ loop: "{{ lookup('list', current_drive_firmware['json']) }}"
diff --git a/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_facts/tasks/main.yml b/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_facts/tasks/main.yml
new file mode 100644
index 000000000..14cc43c62
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_facts/tasks/main.yml
@@ -0,0 +1,19 @@
+# Test code for the na_santricity_facts module
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+
+- name: Retrieve facts from SANtricity Web Services Embedded
+ na_santricity_facts:
+ ssid: "{{ ssid }}"
+ api_url: "{{ base_url }}"
+ api_username: "{{ username }}"
+ api_password: "{{ password }}"
+ validate_certs: "{{ validate_cert }}"
+
+- name: Retrieve facts from SANtricity Web Services Proxy
+ na_santricity_facts:
+ ssid: "{{ proxy_ssid }}"
+ api_url: "{{ proxy_base_url }}"
+ api_username: "{{ proxy_username }}"
+ api_password: "{{ proxy_password }}"
+ validate_certs: "{{ proxy_validate_cert }}" \ No newline at end of file
diff --git a/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_firmware/tasks/firmware_legacy_tests.yml b/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_firmware/tasks/firmware_legacy_tests.yml
new file mode 100644
index 000000000..6aff714cc
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_firmware/tasks/firmware_legacy_tests.yml
@@ -0,0 +1,128 @@
+# Test code for the na_santricity_firmware module
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+
+# TODO: MUST BE DOWNGRADE BEFORE EXECUTING INTEGRATION TO RCB_11.40.3R2_280x_5c7d81b3.dlp and N280X-842834-D02.dlp
+# loadControllerFirmware_MT swartzn@10.113.1.250 /home/swartzn/Downloads/RCB_11.40.3R2_280x_5c7d81b3.dlp /home/swartzn/Downloads/N280X-842834-D02.dlp
+
+# This integration test will validate upgrade functionality for firmware-only, firmware-and-nvsram, and check mode.
+- name: Set credentials and other facts
+ set_fact:
+ proxy_credentials: &proxy_creds
+ ssid: "{{ proxy_legacy_ssid }}"
+ api_url: "{{ proxy_base_url }}"
+ api_username: "{{ proxy_username }}"
+ api_password: "{{ proxy_password }}"
+ validate_certs: "{{ proxy_validate_cert }}"
+ path: "/home/swartzn/Downloads/"
+ upgrades:
+ - firmware: "RC_08405000_m3_e10_840_5600.dlp"
+ nvsram: "N5600-840834-D03.dlp"
+ expected_firmware_version: "08.40.50.00"
+ expected_nvsram_version: "N5600-840834-D03"
+ - firmware: "RC_08403000_m3_e10_840_5600.dlp"
+ nvsram: "N5600-840834-D03.dlp"
+ expected_firmware_version: "08.40.30.00"
+ expected_nvsram_version: "N5600-840834-D03"
+
+- name: Perform firmware upgrade using the Web Services Proxy (changed, firmware)
+ na_santricity_firmware:
+ <<: *proxy_creds
+ nvsram: "{{ path }}{{ upgrades[1]['nvsram'] }}"
+ firmware: "{{ path }}{{ upgrades[1]['firmware'] }}"
+ wait_for_completion: true
+ clear_mel_events: true
+ register: results
+- name: Retrieve current firmware version
+ uri:
+ url: "{{ proxy_base_url }}storage-systems/{{ proxy_legacy_ssid }}/graph/xpath-filter?query=/sa/saData/fwVersion"
+ user: "{{ proxy_username }}"
+ password: "{{ proxy_password }}"
+ validate_certs: no
+ register: current_firmware
+- name: Retrieve current nvsram version
+ uri:
+ url: "{{ proxy_base_url }}storage-systems/{{ proxy_legacy_ssid }}/graph/xpath-filter?query=/sa/saData/nvsramVersion"
+ user: "{{ proxy_username }}"
+ password: "{{ proxy_password }}"
+ validate_certs: no
+ register: current_nvsram
+- name: Verify current firmware version
+ assert:
+ that: "{{ current_firmware['json'][0] == upgrades[1]['expected_firmware_version'] }}"
+ msg: "Failed to change the firmware version."
+- name: Verify current nvsram version
+ assert:
+ that: "{{ current_nvsram['json'][0] == upgrades[1]['expected_nvsram_version'] }}"
+ msg: "Failed to change the nvsram version."
+
+- name: Perform firmware upgrade using the Web Services Proxy (check_mode, changed, firmware)
+ na_santricity_firmware:
+ <<: *proxy_creds
+ nvsram: "{{ path }}{{ upgrades[0]['nvsram'] }}"
+ firmware: "{{ path }}{{ upgrades[0]['firmware'] }}"
+ wait_for_completion: true
+ clear_mel_events: true
+ register: results
+ check_mode: true
+- name: Retrieve current firmware version
+ uri:
+ url: "{{ proxy_base_url }}storage-systems/{{ proxy_legacy_ssid }}/graph/xpath-filter?query=/sa/saData/fwVersion"
+ user: "{{ proxy_username }}"
+ password: "{{ proxy_password }}"
+ validate_certs: no
+ register: current_firmware
+- name: Retrieve current nvsram version
+ uri:
+ url: "{{ proxy_base_url }}storage-systems/{{ proxy_legacy_ssid }}/graph/xpath-filter?query=/sa/saData/nvsramVersion"
+ user: "{{ proxy_username }}"
+ password: "{{ proxy_password }}"
+ validate_certs: no
+ register: current_nvsram
+- name: Verify change status
+ assert:
+ that: "{{ results.changed == True }}"
+ msg: "Failed to return changed."
+- name: Verify current firmware version
+ assert:
+ that: "{{ current_firmware['json'][0] == upgrades[1]['expected_firmware_version'] }}"
+ msg: "Failed to change the firmware version."
+- name: Verify current nvsram version
+ assert:
+ that: "{{ current_nvsram['json'][0] == upgrades[1]['expected_nvsram_version'] }}"
+ msg: "Failed to change the nvsram version."
+
+- name: Perform firmware upgrade using the Web Services Proxy (changed, firmware)
+ na_santricity_firmware:
+ <<: *proxy_creds
+ nvsram: "{{ path }}{{ upgrades[0]['nvsram'] }}"
+ firmware: "{{ path }}{{ upgrades[0]['firmware'] }}"
+ wait_for_completion: true
+ clear_mel_events: true
+ register: results
+- name: Retrieve current firmware version
+ uri:
+ url: "{{ proxy_base_url }}storage-systems/{{ proxy_legacy_ssid }}/graph/xpath-filter?query=/sa/saData/fwVersion"
+ user: "{{ proxy_username }}"
+ password: "{{ proxy_password }}"
+ validate_certs: no
+ register: current_firmware
+- name: Retrieve current nvsram version
+ uri:
+ url: "{{ proxy_base_url }}storage-systems/{{ proxy_legacy_ssid }}/graph/xpath-filter?query=/sa/saData/nvsramVersion"
+ user: "{{ proxy_username }}"
+ password: "{{ proxy_password }}"
+ validate_certs: no
+ register: current_nvsram
+- name: Verify change status
+ assert:
+ that: "{{ results.changed == True }}"
+ msg: "Failed to return changed."
+- name: Verify current firmware version
+ assert:
+ that: "{{ current_firmware['json'][0] == upgrades[0]['expected_firmware_version'] }}"
+ msg: "Failed to change the firmware version."
+- name: Verify current nvsram version
+ assert:
+ that: "{{ current_nvsram['json'][0] == upgrades[0]['expected_nvsram_version'] }}"
+ msg: "Failed to change the nvsram version."
diff --git a/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_firmware/tasks/firmware_tests.yml b/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_firmware/tasks/firmware_tests.yml
new file mode 100644
index 000000000..99827e1bb
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_firmware/tasks/firmware_tests.yml
@@ -0,0 +1,320 @@
+# Test code for the na_santricity_firmware module
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+
+# TODO: MUST BE DOWNGRADE BEFORE EXECUTING INTEGRATION TO RCB_11.40.3R2_280x_5c7d81b3.dlp and N280X-842834-D02.dlp
+# loadControllerFirmware_MT swartzn@10.113.1.250 /home/swartzn/Downloads/RCB_11.40.3R2_280x_5c7d81b3.dlp /home/swartzn/Downloads/N280X-842834-D02.dlp
+
+# This integration test will validate upgrade functionality for firmware-only, firmware-and-nvsram, and check mode.
+- name: Set credentials and other facts
+ set_fact:
+ credentials: &creds
+ ssid: "{{ ssid }}"
+ api_url: "{{ base_url }}"
+ api_username: "{{ username }}"
+ api_password: "{{ password }}"
+ validate_certs: "{{ validate_cert }}"
+ proxy_credentials: &proxy_creds
+ ssid: "{{ proxy_ssid }}"
+ api_url: "{{ proxy_base_url }}"
+ api_username: "{{ proxy_username }}"
+ api_password: "{{ proxy_password }}"
+ validate_certs: "{{ proxy_validate_cert }}"
+ path: "/home/swartzn/Downloads/"
+ upgrades:
+ - firmware: "RCB_11.40.3R2_280x_5c7d81b3.dlp"
+ nvsram: "N280X-842834-D02.dlp"
+ expected_firmware_version: "08.42.30.05"
+ expected_nvsram_version: "N280X-842834-D02"
+ - firmware: "RCB_11.40.5_280x_5ceef00e.dlp"
+ nvsram: "N280X-842834-D02.dlp"
+ expected_firmware_version: "08.42.50.00"
+ expected_nvsram_version: "N280X-842834-D02"
+ - firmware: "RCB_11.50.2_280x_5ce8501f.dlp"
+ nvsram: "N280X-852834-D02.dlp"
+ expected_firmware_version: "08.52.00.00"
+ expected_nvsram_version: "N280X-852834-D02"
+
+- name: Perform firmware upgrade using the Web Services REST API (checkmode-no change, firmware only)
+ na_santricity_firmware:
+ <<: *creds
+ nvsram: "{{ path }}{{ upgrades[0]['nvsram'] }}"
+ firmware: "{{ path }}{{ upgrades[0]['firmware'] }}"
+ wait_for_completion: true
+ clear_mel_events: true
+ check_mode: true
+ register: results
+- name: Retrieve current firmware version
+ uri:
+ url: "{{ base_url }}storage-systems/{{ ssid }}/graph/xpath-filter?query=/sa/saData/fwVersion"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: no
+ register: current_firmware
+- name: Retrieve current nvsram version
+ uri:
+ url: "{{ base_url }}storage-systems/{{ ssid }}/graph/xpath-filter?query=/sa/saData/nvsramVersion"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: no
+ register: current_nvsram
+- name: Verify change status
+ assert:
+ that: "{{ results.changed == False }}"
+ msg: "Failed to return unchanged."
+- name: Verify current firmware version
+ assert:
+ that: "{{ current_firmware['json'][0] == upgrades[0]['expected_firmware_version'] }}"
+ msg: "Unexpected firmware version."
+- name: Verify current nvsram version
+ assert:
+ that: "{{ current_nvsram['json'][0] == upgrades[0]['expected_nvsram_version'] }}"
+ msg: "Unexpected nvsram version."
+
+- name: Perform firmware upgrade using the Web Services REST API (no change, firmware only)
+ na_santricity_firmware:
+ <<: *creds
+ nvsram: "{{ path }}{{ upgrades[0]['nvsram'] }}"
+ firmware: "{{ path }}{{ upgrades[0]['firmware'] }}"
+ wait_for_completion: true
+ clear_mel_events: true
+ register: results
+- name: Retrieve current firmware version
+ uri:
+ url: "{{ base_url }}storage-systems/{{ ssid }}/graph/xpath-filter?query=/sa/saData/fwVersion"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: no
+ register: current_firmware
+- name: Retrieve current nvsram version
+ uri:
+ url: "{{ base_url }}storage-systems/{{ ssid }}/graph/xpath-filter?query=/sa/saData/nvsramVersion"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: no
+ register: current_nvsram
+- name: Verify change status
+ assert:
+ that: "{{ results.changed == False }}"
+ msg: "Failed to return changed."
+- name: Verify current firmware version
+ assert:
+ that: "{{ current_firmware['json'][0] == upgrades[0]['expected_firmware_version'] }}"
+ msg: "Unexpected firmware version."
+- name: Verify current nvsram version
+ assert:
+ that: "{{ current_nvsram['json'][0] == upgrades[0]['expected_nvsram_version'] }}"
+ msg: "Unexpected nvsram version."
+
+- name: Perform firmware upgrade using the Web Services REST API (checkmode-change, firmware)
+ na_santricity_firmware:
+ <<: *creds
+ nvsram: "{{ path }}{{ upgrades[1]['nvsram'] }}"
+ firmware: "{{ path }}{{ upgrades[1]['firmware'] }}"
+ wait_for_completion: true
+ clear_mel_events: true
+ register: results
+ check_mode: true
+- name: Retrieve current firmware version
+ uri:
+ url: "{{ base_url }}storage-systems/{{ ssid }}/graph/xpath-filter?query=/sa/saData/fwVersion"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: no
+ register: current_firmware
+- name: Retrieve current nvsram version
+ uri:
+ url: "{{ base_url }}storage-systems/{{ ssid }}/graph/xpath-filter?query=/sa/saData/nvsramVersion"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: no
+ register: current_nvsram
+- name: Verify change status
+ assert:
+ that: "{{ results.changed == True }}"
+ msg: "Failed to return changed."
+- name: Verify current firmware version
+ assert:
+ that: "{{ current_firmware['json'][0] == upgrades[0]['expected_firmware_version'] }}"
+ msg: "Unexpected firmware version."
+- name: Verify current nvsram version
+ assert:
+ that: "{{ current_nvsram['json'][0] == upgrades[0]['expected_nvsram_version'] }}"
+ msg: "Unexpected nvsram version."
+
+- name: Perform firmware upgrade using the Web Services REST API (change, firmware)
+ na_santricity_firmware:
+ <<: *creds
+ nvsram: "{{ path }}{{ upgrades[1]['nvsram'] }}"
+ firmware: "{{ path }}{{ upgrades[1]['firmware'] }}"
+ wait_for_completion: true
+ clear_mel_events: true
+ register: results
+- name: Retrieve current firmware version
+ uri:
+ url: "{{ base_url }}storage-systems/{{ ssid }}/graph/xpath-filter?query=/sa/saData/fwVersion"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: no
+ register: current_firmware
+- name: Retrieve current nvsram version
+ uri:
+ url: "{{ base_url }}storage-systems/{{ ssid }}/graph/xpath-filter?query=/sa/saData/nvsramVersion"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: no
+ register: current_nvsram
+- name: Verify change status
+ assert:
+ that: "{{ results.changed == True }}"
+ msg: "Failed to return changed."
+- name: Verify current firmware version
+ assert:
+ that: "{{ current_firmware['json'][0] == upgrades[1]['expected_firmware_version'] }}"
+ msg: "Unexpected firmware version. {{ current_firmware['json'][0] }} != {{ upgrades[1]['expected_firmware_version'] }}"
+- name: Verify current nvsram version
+ assert:
+ that: "{{ current_nvsram['json'][0] == upgrades[1]['expected_nvsram_version'] }}"
+ msg: "Unexpected nvsram version. {{ current_nvsram['json'][0] }} != {{ upgrades[1]['expected_nvsram_version'] }}"
+
+- name: Perform firmware upgrade using the Web Services Proxy (changed, firmware)
+ na_santricity_firmware:
+ <<: *proxy_creds
+ nvsram: "{{ path }}{{ upgrades[0]['nvsram'] }}"
+ firmware: "{{ path }}{{ upgrades[0]['firmware'] }}"
+ wait_for_completion: true
+ clear_mel_events: true
+ register: results
+- name: Retrieve current firmware version
+ uri:
+ url: "{{ proxy_base_url }}storage-systems/{{ proxy_ssid }}/graph/xpath-filter?query=/sa/saData/fwVersion"
+ user: "{{ proxy_username }}"
+ password: "{{ proxy_password }}"
+ validate_certs: no
+ register: current_firmware
+- name: Retrieve current nvsram version
+ uri:
+ url: "{{ proxy_base_url }}storage-systems/{{ proxy_ssid }}/graph/xpath-filter?query=/sa/saData/nvsramVersion"
+ user: "{{ proxy_username }}"
+ password: "{{ proxy_password }}"
+ validate_certs: no
+ register: current_nvsram
+- name: Verify change status
+ assert:
+ that: "{{ results.changed == True }}"
+ msg: "Failed to return changed."
+- name: Verify current firmware version
+ assert:
+ that: "{{ current_firmware['json'][0] == upgrades[0]['expected_firmware_version'] }}"
+ msg: "Failed to change the firmware version."
+- name: Verify current nvsram version
+ assert:
+ that: "{{ current_nvsram['json'][0] == upgrades[0]['expected_nvsram_version'] }}"
+ msg: "Failed to change the nvsram version."
+
+- name: Perform firmware upgrade using the Web Services REST API (checkmode-unchanged, firmware)
+ na_santricity_firmware:
+ <<: *proxy_creds
+ nvsram: "{{ path }}{{ upgrades[0]['nvsram'] }}"
+ firmware: "{{ path }}{{ upgrades[0]['firmware'] }}"
+ wait_for_completion: true
+ clear_mel_events: true
+ check_mode: true
+ register: results
+- name: Retrieve current firmware version
+ uri:
+ url: "{{ proxy_base_url }}storage-systems/{{ proxy_ssid }}/graph/xpath-filter?query=/sa/saData/fwVersion"
+ user: "{{ proxy_username }}"
+ password: "{{ proxy_password }}"
+ validate_certs: no
+ register: current_firmware
+- name: Retrieve current nvsram version
+ uri:
+ url: "{{ proxy_base_url }}storage-systems/{{ proxy_ssid }}/graph/xpath-filter?query=/sa/saData/nvsramVersion"
+ user: "{{ proxy_username }}"
+ password: "{{ proxy_password }}"
+ validate_certs: no
+ register: current_nvsram
+- name: Verify change status
+ assert:
+ that: "{{ results.changed == False }}"
+ msg: "Failed to return unchanged."
+- name: Verify current firmware version
+ assert:
+ that: "{{ current_firmware['json'][0] == upgrades[0]['expected_firmware_version'] }}"
+ msg: "Failed to change the firmware version."
+- name: Verify current nvsram version
+ assert:
+ that: "{{ current_nvsram['json'][0] == upgrades[0]['expected_nvsram_version'] }}"
+ msg: "Failed to change the nvsram version."
+
+- name: Perform firmware upgrade using the Web Services REST API (checkmode-change, firmware and nvsram)
+ na_santricity_firmware:
+ <<: *proxy_creds
+ nvsram: "{{ path }}{{ upgrades[2]['nvsram'] }}"
+ firmware: "{{ path }}{{ upgrades[2]['firmware'] }}"
+ wait_for_completion: true
+ clear_mel_events: true
+ check_mode: true
+ register: results
+- name: Retrieve current firmware version
+ uri:
+ url: "{{ proxy_base_url }}storage-systems/{{ proxy_ssid }}/graph/xpath-filter?query=/sa/saData/fwVersion"
+ user: "{{ proxy_username }}"
+ password: "{{ proxy_password }}"
+ validate_certs: no
+ register: current_firmware
+- name: Retrieve current nvsram version
+ uri:
+ url: "{{ proxy_base_url }}storage-systems/{{ proxy_ssid }}/graph/xpath-filter?query=/sa/saData/nvsramVersion"
+ user: "{{ proxy_username }}"
+ password: "{{ proxy_password }}"
+ validate_certs: no
+ register: current_nvsram
+- name: Verify change status
+ assert:
+ that: "{{ results.changed == True }}"
+ msg: "Failed to return changed."
+- name: Verify current firmware version
+ assert:
+ that: "{{ current_firmware['json'][0] == upgrades[0]['expected_firmware_version'] }}"
+ msg: "Failed to change the firmware version."
+- name: Verify current nvsram version
+ assert:
+ that: "{{ current_nvsram['json'][0] == upgrades[0]['expected_nvsram_version'] }}"
+ msg: "Failed to change the nvsram version."
+
+- name: Perform firmware upgrade using the Web Services REST API (changed, firmware and nvsram)
+ na_santricity_firmware:
+ <<: *proxy_creds
+ nvsram: "{{ path }}{{ upgrades[2]['nvsram'] }}"
+ firmware: "{{ path }}{{ upgrades[2]['firmware'] }}"
+ wait_for_completion: true
+ clear_mel_events: true
+ register: results
+- name: Retrieve current firmware version
+ uri:
+ url: "{{ proxy_base_url }}storage-systems/{{ proxy_ssid }}/graph/xpath-filter?query=/sa/saData/fwVersion"
+ user: "{{ proxy_username }}"
+ password: "{{ proxy_password }}"
+ validate_certs: no
+ register: current_firmware
+- name: Retrieve current nvsram version
+ uri:
+ url: "{{ proxy_base_url }}storage-systems/{{ proxy_ssid }}/graph/xpath-filter?query=/sa/saData/nvsramVersion"
+ user: "{{ proxy_username }}"
+ password: "{{ proxy_password }}"
+ validate_certs: no
+ register: current_nvsram
+- name: Verify change status
+ assert:
+ that: "{{ results.changed == True }}"
+ msg: "Failed to return changed."
+- name: Verify current firmware version
+ assert:
+ that: "{{ current_firmware['json'][0] == upgrades[2]['expected_firmware_version'] }}"
+ msg: "Failed to change the firmware version."
+- name: Verify current nvsram version
+ assert:
+ that: "{{ current_nvsram['json'][0] == upgrades[2]['expected_nvsram_version'] }}"
+ msg: "Failed to change the nvsram version."
diff --git a/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_firmware/tasks/main.yml b/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_firmware/tasks/main.yml
new file mode 100644
index 000000000..15edc5200
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_firmware/tasks/main.yml
@@ -0,0 +1,2 @@
+- include_tasks: firmware_tests.yml
+- include_tasks: firmware_legacy_tests.yml
diff --git a/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_global/tasks/main.yml b/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_global/tasks/main.yml
new file mode 100644
index 000000000..9d6e6df92
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_global/tasks/main.yml
@@ -0,0 +1,185 @@
+# Test code for the nac_sancticity_global module.
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+- include_vars: "../../integration_config.yml"
+
+- name: Set initial global settings
+ na_santricity_global:
+ ssid: "{{ ssid }}"
+ api_url: "{{ base_url }}"
+ api_username: "{{ username }}"
+ api_password: "{{ password }}"
+ validate_certs: "{{ validate_cert }}"
+ name: arrayname01
+ cache_block_size: 32768
+ cache_flush_threshold: 80
+ automatic_load_balancing: disabled
+ host_connectivity_reporting: disabled
+ default_host_type: linux dm-mp
+- name: Retrieve the current array graph
+ uri:
+ user: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_cert }}"
+ url: "{{ base_url }}storage-systems/{{ ssid }}/graph/xpath-filter?query=/sa"
+ register: graph
+- name: Validate initial global settings
+ assert:
+ that: "{{ graph['json'][0]['saData']['storageArrayLabel'] == 'arrayname01' and
+ graph['json'][0]['cache']['cacheBlkSize'] == 32768 and
+ graph['json'][0]['cache']['demandFlushThreshold'] == 80 and
+ not graph['json'][0]['autoLoadBalancingEnabled'] and
+ not graph['json'][0]['hostConnectivityReportingEnabled'] and
+ graph['json'][0]['defaultHostTypeIndex'] == 28 }}"
+ msg: "Failed to set initial global settings"
+
+- name: Repeat initial global settings
+ na_santricity_global:
+ ssid: "{{ ssid }}"
+ api_url: "{{ base_url }}"
+ api_username: "{{ username }}"
+ api_password: "{{ password }}"
+ validate_certs: "{{ validate_cert }}"
+ name: arrayname01
+ cache_block_size: 32768
+ cache_flush_threshold: 80
+ automatic_load_balancing: disabled
+ host_connectivity_reporting: disabled
+ default_host_type: linux dm-mp
+ register: result
+- name: Retrieve the current array graph
+ uri:
+ user: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_cert }}"
+ url: "{{ base_url }}storage-systems/{{ ssid }}/graph/xpath-filter?query=/sa"
+ register: graph
+- name: Validate initial global settings
+ assert:
+ that: "{{ not result.changed and
+ graph['json'][0]['saData']['storageArrayLabel'] == 'arrayname01' and
+ graph['json'][0]['cache']['cacheBlkSize'] == 32768 and
+ graph['json'][0]['cache']['demandFlushThreshold'] == 80 and
+ not graph['json'][0]['autoLoadBalancingEnabled'] and
+ not graph['json'][0]['hostConnectivityReportingEnabled'] and
+ graph['json'][0]['defaultHostTypeIndex'] == 28 }}"
+ msg: "Failed to set initial global settings"
+
+- name: Change global settings (check-mode)
+ na_santricity_global:
+ ssid: "{{ ssid }}"
+ api_url: "{{ base_url }}"
+ api_username: "{{ username }}"
+ api_password: "{{ password }}"
+ validate_certs: "{{ validate_cert }}"
+ name: arrayname02
+ cache_block_size: 8192
+ cache_flush_threshold: 60
+ automatic_load_balancing: disabled
+ host_connectivity_reporting: disabled
+ default_host_type: windows
+ check_mode: true
+ register: result
+- name: Retrieve the current array graph
+ uri:
+ user: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_cert }}"
+ url: "{{ base_url }}storage-systems/{{ ssid }}/graph/xpath-filter?query=/sa"
+ register: graph
+- name: Validate initial global settings
+ assert:
+ that: "{{ result.changed and
+ graph['json'][0]['saData']['storageArrayLabel'] == 'arrayname01' and
+ graph['json'][0]['cache']['cacheBlkSize'] == 32768 and
+ graph['json'][0]['cache']['demandFlushThreshold'] == 80 and
+ not graph['json'][0]['autoLoadBalancingEnabled'] and
+ not graph['json'][0]['hostConnectivityReportingEnabled'] and
+ graph['json'][0]['defaultHostTypeIndex'] == 28 }}"
+ msg: "Failed to set initial global settings"
+
+- name: Change global settings
+ na_santricity_global:
+ ssid: "{{ ssid }}"
+ api_url: "{{ base_url }}"
+ api_username: "{{ username }}"
+ api_password: "{{ password }}"
+ validate_certs: "{{ validate_cert }}"
+ name: arrayname02
+ cache_block_size: 8192
+ cache_flush_threshold: 60
+ automatic_load_balancing: disabled
+ host_connectivity_reporting: disabled
+ default_host_type: windows
+ register: result
+- name: Retrieve the current array graph
+ uri:
+ user: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_cert }}"
+ url: "{{ base_url }}storage-systems/{{ ssid }}/graph/xpath-filter?query=/sa"
+ register: graph
+- name: Validate initial global settings
+ assert:
+ that: "{{ result.changed and
+ graph['json'][0]['saData']['storageArrayLabel'] == 'arrayname02' and
+ graph['json'][0]['cache']['cacheBlkSize'] == 8192 and
+ graph['json'][0]['cache']['demandFlushThreshold'] == 60 and
+ not graph['json'][0]['autoLoadBalancingEnabled'] and
+ not graph['json'][0]['hostConnectivityReportingEnabled'] and
+ graph['json'][0]['defaultHostTypeIndex'] == 1 }}"
+ msg: "Failed to set initial global settings"
+
+- name: Turn on autoload balancing which should force enable host connection reporting
+ na_santricity_global:
+ ssid: "{{ ssid }}"
+ api_url: "{{ base_url }}"
+ api_username: "{{ username }}"
+ api_password: "{{ password }}"
+ validate_certs: "{{ validate_cert }}"
+ automatic_load_balancing: enabled
+ register: result
+- name: Retrieve the current array graph
+ uri:
+ user: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_cert }}"
+ url: "{{ base_url }}storage-systems/{{ ssid }}/graph/xpath-filter?query=/sa"
+ register: graph
+- name: Validate initial global settings
+ assert:
+ that: "{{ result.changed and
+ graph['json'][0]['saData']['storageArrayLabel'] == 'arrayname02' and
+ graph['json'][0]['cache']['cacheBlkSize'] == 8192 and
+ graph['json'][0]['cache']['demandFlushThreshold'] == 60 and
+ graph['json'][0]['autoLoadBalancingEnabled'] and
+ graph['json'][0]['hostConnectivityReportingEnabled'] and
+ graph['json'][0]['defaultHostTypeIndex'] == 1 }}"
+ msg: "Failed to set initial global settings"
+
+- name: Change array name only
+ na_santricity_global:
+ ssid: "{{ ssid }}"
+ api_url: "{{ base_url }}"
+ api_username: "{{ username }}"
+ api_password: "{{ password }}"
+ validate_certs: "{{ validate_cert }}"
+ name: arrayname03
+ register: result
+- name: Retrieve the current array graph
+ uri:
+ user: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_cert }}"
+ url: "{{ base_url }}storage-systems/{{ ssid }}/graph/xpath-filter?query=/sa"
+ register: graph
+- name: Validate initial global settings
+ assert:
+ that: "{{ result.changed and
+ graph['json'][0]['saData']['storageArrayLabel'] == 'arrayname03' and
+ graph['json'][0]['cache']['cacheBlkSize'] == 8192 and
+ graph['json'][0]['cache']['demandFlushThreshold'] == 60 and
+ graph['json'][0]['autoLoadBalancingEnabled'] and
+ graph['json'][0]['hostConnectivityReportingEnabled'] and
+ graph['json'][0]['defaultHostTypeIndex'] == 1 }}"
+ msg: "Failed to set initial global settings"
diff --git a/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_host/tasks/main.yml b/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_host/tasks/main.yml
new file mode 100644
index 000000000..cb460a9ea
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_host/tasks/main.yml
@@ -0,0 +1,243 @@
+# Test code for the na_santricity_host module
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+- name: Set facts for na_santricity_host module's intergration test.
+ set_fact:
+ vars:
+ credentials: &creds
+ ssid: "{{ ssid }}"
+ api_url: "{{ base_url }}"
+ api_username: "{{ username }}"
+ api_password: "{{ password }}"
+ validate_certs: "{{ validate_cert }}"
+
+- name: Create iSCSI host
+ na_santricity_host:
+ <<: *creds
+ name: windows_iscsi_host
+ host_type: Windows
+ ports:
+ - type: iscsi
+ label: iscsi_p1
+ port: iqn.windows.host.com.1
+ - type: iscsi
+ label: iscsi_p2
+ port: iqn.windows.host.com.2
+
+- name: Create FC host
+ na_santricity_host:
+ <<: *creds
+ name: linux_fc_host
+ host_type: Linux dm-mp
+ ports:
+ - type: fc
+ label: fc_p1
+ port: "0x1122334455667788"
+ - type: fc
+ label: fc_p2
+ port: "01:23:45:67:89:1a:bc:de"
+
+- name: Attempt to change FC host port using different port case (no change)
+ na_santricity_host:
+ <<: *creds
+ name: linux_fc_host
+ host_type: Linux dm-mp
+ ports:
+ - type: FC
+ label: fc_p1
+ port: "0x1122334455667788"
+ - type: FC
+ label: fc_p2
+ port: "01:23:45:67:89:1A:BC:DE"
+ register: results
+- name: Verify no changes were made
+ assert:
+ that: "{{ not results['changed'] }}"
+ msg: "Unexpected results!"
+
+- name: (Repeat) Create iSCSI host (no change)
+ na_santricity_host:
+ <<: *creds
+ name: windows_iscsi_host
+ host_type: Windows
+ ports:
+ - type: iscsi
+ label: iscsi_p1
+ port: iqn.windows.host.com.1
+ - type: iscsi
+ label: iscsi_p2
+ port: iqn.windows.host.com.2
+ register: results
+- name: Verify no changes were made
+ assert:
+ that: "{{ not results['changed'] }}"
+ msg: "Unexpected results!"
+
+- name: (Repeat) Create FC host (no change)
+ na_santricity_host:
+ <<: *creds
+ name: linux_fc_host
+ host_type: Linux dm-mp
+ ports:
+ - type: fc
+ label: fc_p1
+ port: "0x1122334455667788"
+ - type: fc
+ label: fc_p2
+ port: "01:23:45:67:89:1a:bc:de"
+ register: results
+- name: Verify no changes were made
+ assert:
+ that: "{{ not results['changed'] }}"
+ msg: "Unexpected results!"
+
+- name: Create FC host with an used port (change, check_mode)
+ na_santricity_host:
+ <<: *creds
+ name: linux_fc2_host
+ host_type: Linux dm-mp
+ force_port: true
+ ports:
+ - type: fc
+ label: fc2_p1
+ port: "0x1122334455667788"
+ check_mode: true
+ register: results
+- name: Verify changes were made
+ assert:
+ that: "{{ results['changed'] }}"
+ msg: "Unexpected results!"
+
+- name: (Repeat) Change FC host label to uppercase (change)
+ na_santricity_host:
+ <<: *creds
+ name: Linux_FC_Host
+ host_type: Linux dm-mp
+ ports:
+ - type: fc
+ label: fc_p1
+ port: "0x1122334455667788"
+ - type: fc
+ label: fc_p2
+ port: "01:23:45:67:89:1a:bc:de"
+ register: results
+- name: Verify changes were made
+ assert:
+ that: "{{ results['changed'] }}"
+ msg: "Unexpected results!"
+
+- name: (Repeat) Change FC host port labels to uppercase (change)
+ na_santricity_host:
+ <<: *creds
+ name: Linux_FC_Host
+ host_type: Linux dm-mp
+ ports:
+ - type: fc
+ label: FC_P1
+ port: "0x1122334455667788"
+ - type: fc
+ label: FC_P2
+ port: "01:23:45:67:89:1a:bc:de"
+ register: results
+- name: Verify changes were made
+ assert:
+ that: "{{ results['changed'] }}"
+ msg: "Unexpected results!"
+
+- name: Create FC host with an used port (change)
+ na_santricity_host:
+ <<: *creds
+ name: linux_fc2_host
+ host_type: Linux dm-mp
+ force_port: true
+ ports:
+ - type: fc
+ label: fc2_p1
+ port: "0x1122334455667788"
+ register: results
+- name: Verify changes were made
+ assert:
+ that: "{{ results['changed'] }}"
+ msg: "Unexpected results!"
+
+- name: Create FC host with an used port (no change)
+ na_santricity_host:
+ <<: *creds
+ name: linux_fc2_host
+ host_type: Linux dm-mp
+ force_port: true
+ ports:
+ - type: fc
+ label: fc2_p1
+ port: "0x1122334455667788"
+ register: results
+- name: Verify no changes were made
+ assert:
+ that: "{{ not results['changed'] }}"
+ msg: "Unexpected results!"
+
+- name: Delete iSCSI host (changed)
+ na_santricity_host:
+ <<: *creds
+ state: absent
+ name: windows_iscsi_host
+ register: results
+- name: Verify changes were made
+ assert:
+ that: "{{ results['changed'] }}"
+ msg: "Unexpected results!"
+
+- name: Delete FC host (changed)
+ na_santricity_host:
+ <<: *creds
+ state: absent
+ name: Linux_FC_Host
+ register: results
+- name: Verify changes were made
+ assert:
+ that: "{{ results['changed'] }}"
+ msg: "Unexpected results!"
+
+- name: Delete second FC host (changed)
+ na_santricity_host:
+ <<: *creds
+ state: absent
+ name: linux_fc2_host
+ register: results
+- name: Verify changes were made
+ assert:
+ that: "{{ results['changed'] }}"
+ msg: "Unexpected results!"
+
+- name: (Repeat) Delete iSCSI host (no change)
+ na_santricity_host:
+ <<: *creds
+ state: absent
+ name: windows_iscsi_host
+ register: results
+- name: Verify no changes were made
+ assert:
+ that: "{{ not results['changed'] }}"
+ msg: "Unexpected results!"
+
+- name: (Repeat) Delete second FC host (no change)
+ na_santricity_host:
+ <<: *creds
+ state: absent
+ name: Linux_FC_Host
+ register: results
+- name: Verify no changes were made
+ assert:
+ that: "{{ not results['changed'] }}"
+ msg: "Unexpected results!"
+
+- name: (Repeat) Delete FC host (no change)
+ na_santricity_host:
+ <<: *creds
+ state: absent
+ name: linux_fc2_host
+ register: results
+- name: Verify no changes were made
+ assert:
+ that: "{{ not results['changed'] }}"
+ msg: "Unexpected results!"
diff --git a/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_hostgroup/tasks/main.yml b/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_hostgroup/tasks/main.yml
new file mode 100644
index 000000000..8a2af77dc
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_hostgroup/tasks/main.yml
@@ -0,0 +1,137 @@
+# Test code for the na_santricity_hostgroup module
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+- name: Set facts for na_santricity_host module's intergration test.
+ set_fact:
+ credentials: &creds
+ ssid: "{{ ssid }}"
+ api_url: "{{ base_url }}"
+ api_username: "{{ username }}"
+ api_password: "{{ password }}"
+ validate_certs: "{{ validate_cert }}"
+
+- name: Setup hosts for the groups
+ block:
+ - name: Create iSCSI host
+ na_santricity_host:
+ <<: *creds
+ name: windows_iscsi_host
+ host_type: Windows
+ ports:
+ - type: iscsi
+ label: iscsi_p1
+ port: iqn.windows.host.com.1
+ - type: iscsi
+ label: iscsi_p2
+ port: iqn.windows.host.com.2
+ - name: Create FC host
+ na_santricity_host:
+ <<: *creds
+ name: linux_fc_host
+ host_type: Linux dm-mp
+ ports:
+ - type: fc
+ label: fc_p1
+ port: "0x1122334455667788"
+ - type: fc
+ label: fc_p2
+ port: "01:23:45:67:89:1a:bc:de"
+
+- name: Create host group and add one host (change)
+ na_santricity_hostgroup:
+ <<: *creds
+ name: hostgroup_test
+ hosts:
+ - windows_iscsi_host
+ register: results
+- name: Verify results
+ assert:
+ that: "{{ results['changed'] }}"
+ msg: "Unexpected results!"
+
+- name: (Repeat) Create host group and add one host (no change)
+ na_santricity_hostgroup:
+ <<: *creds
+ name: hostgroup_test
+ hosts:
+ - windows_iscsi_host
+ register: results
+- name: Verify results
+ assert:
+ that: "{{ not results['changed'] }}"
+ msg: "Unexpected results!"
+
+- name: Add one host (change, check_mode)
+ na_santricity_hostgroup:
+ <<: *creds
+ name: hostgroup_test
+ hosts:
+ - windows_iscsi_host
+ - linux_fc_host
+ register: results
+ check_mode: true
+- name: Verify results
+ assert:
+ that: "{{ results['changed'] }}"
+ msg: "Unexpected results!"
+
+- name: Add one host (change, check_mode)
+ na_santricity_hostgroup:
+ <<: *creds
+ name: hostgroup_test
+ hosts:
+ - windows_iscsi_host
+ - linux_fc_host
+ register: results
+ check_mode: true
+- name: Verify results
+ assert:
+ that: "{{ results['changed'] }}"
+ msg: "Unexpected results!"
+
+- name: Add one host (change)
+ na_santricity_hostgroup:
+ <<: *creds
+ name: hostgroup_test
+ hosts:
+ - windows_iscsi_host
+ - linux_fc_host
+ register: results
+- name: Verify results
+ assert:
+ that: "{{ results['changed'] }}"
+ msg: "Unexpected results!"
+
+- name: Remove one host (change)
+ na_santricity_hostgroup:
+ <<: *creds
+ name: hostgroup_test
+ hosts:
+ - linux_fc_host
+ register: results
+- name: Verify results
+ assert:
+ that: "{{ results['changed'] }}"
+ msg: "Unexpected results!"
+
+- name: Delete host group (change)
+ na_santricity_hostgroup:
+ <<: *creds
+ state: absent
+ name: hostgroup_test
+
+- name: Delete hosts for the groups
+ block:
+ - name: Delete iSCSI host
+ na_santricity_host:
+ <<: *creds
+ state: absent
+ name: windows_iscsi_host
+ register: results
+
+ - name: Delete FC host
+ na_santricity_host:
+ <<: *creds
+ state: absent
+ name: linux_fc_host
+ register: results
diff --git a/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_ib_iser_interface/tasks/main.yml b/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_ib_iser_interface/tasks/main.yml
new file mode 100644
index 000000000..d2d8142b4
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_ib_iser_interface/tasks/main.yml
@@ -0,0 +1,88 @@
+# Test code for the na_santricity_ib_iser_interface module
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+- name: Set facts for na_santricity_ib_iser_interface module test
+ set_fact:
+ credentials: &creds
+ ssid: "{{ ssid }}"
+ api_url: "{{ base_url }}"
+ api_username: "{{ username }}"
+ api_password: "{{ password }}"
+ validate_certs: "{{ validate_cert }}"
+ interface_a1_ip: &a1_ip 192.168.1.101
+ interface_a2_ip: &a2_ip 192.168.2.101
+
+- name: Set the initial ib_iser interfaces
+ na_santricity_ib_iser_interface:
+ <<: *creds
+ controller: "{{ item[0] }}"
+ channel: "{{ item[1] }}"
+ address: "{{ item[2] }}"
+ loop:
+ - ["A", "1", *a1_ip]
+ - ["B", "1", *a2_ip]
+
+- name: Repeat the initial ib_iser interfaces (no change)
+ na_santricity_ib_iser_interface:
+ <<: *creds
+ controller: "{{ item[0] }}"
+ channel: "{{ item[1] }}"
+ address: "{{ item[2] }}"
+ register: results
+ loop:
+ - ["A", "1", *a1_ip]
+ - ["B", "1", *a2_ip]
+- name: Verify no changes were made
+ assert:
+ that: "{{ not item['changed'] }}"
+ msg: "Unexpected results!"
+ loop: "{{ lookup('list', results['results']) }}"
+
+- name: Change the initial ib_iser interfaces (changed, check_mode)
+ na_santricity_ib_iser_interface:
+ <<: *creds
+ controller: "{{ item[0] }}"
+ channel: "{{ item[1] }}"
+ address: "{{ item[2] }}"
+ register: results
+ loop:
+ - ["A", "1", "192.168.3.230"]
+ - ["B", "1", "192.168.3.231"]
+ check_mode: true
+- name: Verify no changes were made
+ assert:
+ that: "{{ item['changed'] }}"
+ msg: "Unexpected results!"
+ loop: "{{ lookup('list', results['results']) }}"
+
+- name: Change the initial ib_iser interfaces (changed)
+ na_santricity_ib_iser_interface:
+ <<: *creds
+ controller: "{{ item[0] }}"
+ channel: "{{ item[1] }}"
+ address: "{{ item[2] }}"
+ register: results
+ loop:
+ - ["A", "1", "192.168.3.230"]
+ - ["B", "1", "192.168.3.231"]
+- name: Verify no changes were made
+ assert:
+ that: "{{ item['changed'] }}"
+ msg: "Unexpected results!"
+ loop: "{{ lookup('list', results['results']) }}"
+
+- name: Revert to the initial ib_iser interfaces (changed)
+ na_santricity_ib_iser_interface:
+ <<: *creds
+ controller: "{{ item[0] }}"
+ channel: "{{ item[1] }}"
+ address: "{{ item[2] }}"
+ register: results
+ loop:
+ - ["A", "1", *a1_ip]
+ - ["B", "1", *a2_ip]
+- name: Verify no changes were made
+ assert:
+ that: "{{ item['changed'] }}"
+ msg: "Unexpected results!"
+ loop: "{{ lookup('list', results['results']) }}" \ No newline at end of file
diff --git a/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_iscsi_interface/tasks/main.yml b/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_iscsi_interface/tasks/main.yml
new file mode 100644
index 000000000..38b6faba1
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_iscsi_interface/tasks/main.yml
@@ -0,0 +1,115 @@
+# Test code for the na_santricity_iscsi_interface module
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+- name: Set facts for na_santricity_iscsi_interface module's intergration test.
+ set_fact:
+ credentials: &creds
+ ssid: "{{ ssid }}"
+ api_url: "{{ base_url }}"
+ api_username: "{{ username }}"
+ api_password: "{{ password }}"
+ validate_certs: "{{ validate_cert }}"
+
+- name: Set controller iSCSI interfaces to DHCP
+ na_santricity_iscsi_interface:
+ <<: *creds
+ controller: "{{ item }}"
+ port: 1
+ config_method: dhcp
+ mtu: 1500
+ loop: ["A", "B"]
+
+- name: Set controller A iSCSI interface to static (change, check_mode)
+ na_santricity_iscsi_interface:
+ <<: *creds
+ controller: A
+ port: 1
+ config_method: static
+ address: 192.168.1.100
+ subnet_mask: 255.255.255.0
+ gateway: 192.168.1.1
+ mtu: 1500
+ check_mode: true
+ register: results
+- name: Verify results
+ assert:
+ that: "{{ results['changed'] }}"
+ msg: "Unexpected results!"
+
+- name: (Repeat) Set controller A iSCSI interface to static (change)
+ na_santricity_iscsi_interface:
+ <<: *creds
+ controller: A
+ port: 1
+ config_method: static
+ address: 192.168.1.100
+ subnet_mask: 255.255.255.0
+ gateway: 192.168.1.1
+ mtu: 1500
+ register: results
+- name: Verify results
+ assert:
+ that: "{{ results['changed'] }}"
+ msg: "Unexpected results!"
+
+- name: (Repeat) Set controller A iSCSI interface to static (no change)
+ na_santricity_iscsi_interface:
+ <<: *creds
+ controller: A
+ port: 1
+ config_method: static
+ address: 192.168.1.100
+ subnet_mask: 255.255.255.0
+ gateway: 192.168.1.1
+ mtu: 1500
+ register: results
+- name: Verify results
+ assert:
+ that: "{{ not results['changed'] }}"
+ msg: "Unexpected results!"
+
+- name: Set controller B iSCSI interface to static (change)
+ na_santricity_iscsi_interface:
+ <<: *creds
+ controller: B
+ port: 1
+ config_method: static
+ address: 192.168.1.200
+ subnet_mask: 255.255.255.0
+ gateway: 192.168.1.1
+ mtu: 1500
+ register: results
+- name: Verify results
+ assert:
+ that: "{{ results['changed'] }}"
+ msg: "Unexpected results!"
+
+- name: Set controller A iSCSI interface MTU to 9000 (change)
+ na_santricity_iscsi_interface:
+ <<: *creds
+ controller: A
+ port: 1
+ config_method: static
+ address: 192.168.1.100
+ subnet_mask: 255.255.255.0
+ gateway: 192.168.1.1
+ mtu: 9000
+ register: results
+- name: Verify results
+ assert:
+ that: "{{ results['changed'] }}"
+ msg: "Unexpected results!"
+
+- name: Set controller iSCSI interfaces to DHCP
+ na_santricity_iscsi_interface:
+ <<: *creds
+ controller: "{{ item }}"
+ port: 1
+ config_method: dhcp
+ mtu: 1500
+ loop: ["A", "B"]
+ register: results
+- name: Verify results
+ assert:
+ that: "{{ results['changed'] }}"
+ msg: "Unexpected results!"
diff --git a/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_iscsi_target/tasks/main.yml b/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_iscsi_target/tasks/main.yml
new file mode 100644
index 000000000..b259ec878
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_iscsi_target/tasks/main.yml
@@ -0,0 +1,81 @@
+# Test code for the na_santricity_iscsi_target module
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+- name: Set facts for na_santricity_iscsi_target module's intergration test.
+ set_fact:
+ vars:
+ credentials: &creds
+ ssid: "{{ ssid }}"
+ api_url: "{{ base_url }}"
+ api_username: "{{ username }}"
+ api_password: "{{ password }}"
+ validate_certs: "{{ validate_cert }}"
+
+- name: Set initial iSCSI target state
+ na_santricity_iscsi_target:
+ <<: *creds
+ name: eseries_storage_iscsi_target
+ ping: false
+ unnamed_discovery: false
+ chap_secret: "chappySecret"
+
+- name: Clear chap secret
+ na_santricity_iscsi_target:
+ <<: *creds
+ name: eseries_storage_iscsi_target
+ ping: false
+ unnamed_discovery: false
+ register: results
+- name: Verify results
+ assert:
+ that: "{{ results['changed'] }}"
+ msg: "Unexpected results!"
+
+- name: Make iSCSI target pingable (change, check_mode)
+ na_santricity_iscsi_target:
+ <<: *creds
+ name: eseries_storage_iscsi_target
+ ping: true
+ unnamed_discovery: false
+ check_mode: true
+ register: results
+- name: Verify results
+ assert:
+ that: "{{ results['changed'] }}"
+ msg: "Unexpected results!"
+
+- name: (Repeat) Make iSCSI target pingable (change)
+ na_santricity_iscsi_target:
+ <<: *creds
+ name: eseries_storage_iscsi_target
+ ping: true
+ unnamed_discovery: false
+ register: results
+- name: Verify results
+ assert:
+ that: "{{ results['changed'] }}"
+ msg: "Unexpected results!"
+
+- name: (Repeat) Make iSCSI target pingable (no change)
+ na_santricity_iscsi_target:
+ <<: *creds
+ name: eseries_storage_iscsi_target
+ ping: true
+ unnamed_discovery: false
+ register: results
+- name: Verify results
+ assert:
+ that: "{{ not results['changed'] }}"
+ msg: "Unexpected results!"
+
+- name: (Repeat) Make iSCSI target discoverable (change)
+ na_santricity_iscsi_target:
+ <<: *creds
+ name: eseries_storage_iscsi_target
+ ping: true
+ unnamed_discovery: true
+ register: results
+- name: Verify results
+ assert:
+ that: "{{ results['changed'] }}"
+ msg: "Unexpected results!"
diff --git a/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_ldap/tasks/main.yml b/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_ldap/tasks/main.yml
new file mode 100644
index 000000000..b7b57df11
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_ldap/tasks/main.yml
@@ -0,0 +1,104 @@
+# Test code for the nac_sancticity_ldap module
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+- include_vars: "../../integration_config.yml"
+
+- set_fact:
+ credentials: &creds
+ ssid: "{{ ssid }}"
+ api_url: "{{ base_url }}"
+ api_username: "{{ username }}"
+ api_password: "{{ password }}"
+ validate_certs: "{{ validate_cert }}"
+ ldap_info: &info
+ bind_user: "{{ bind_user }}"
+ bind_password: "{{ bind_password }}"
+ server_url: "{{ server_url }}"
+ search_base: "{{ search_base }}"
+ role_mappings:
+ - ".*":
+ - storage.admin
+ - security.admin
+ - support.admin
+ - storage.monitor
+ - ".*":
+ - storage.monitor
+
+- name: Delete default LDAP domain
+ na_santricity_ldap:
+ <<: *creds
+ state: disabled
+
+- name: Delete default LDAP domain
+ na_santricity_ldap:
+ <<: *creds
+ state: disabled
+ register: results
+- name: Verify LDAP changes
+ assert:
+ that: "{{ not results['changed'] }}"
+ msg: "Unexpected results!"
+
+- name: Define a default LDAP domain, utilizing defaults where possible (changed, check_mode)
+ na_santricity_ldap:
+ <<: *creds
+ <<: *info
+ state: present
+ identifier: test1
+ role_mappings: "{{ role_mappings[0] }}"
+ check_mode: true
+ register: results
+- name: Verify LDAP changes
+ assert:
+ that: "{{ results['changed'] }}"
+ msg: "Unexpected results!"
+
+- name: Define a default LDAP domain, utilizing defaults where possible (changed)
+ na_santricity_ldap:
+ <<: *creds
+ <<: *info
+ state: present
+ identifier: test1
+ role_mappings: "{{ role_mappings[0] }}"
+ register: results
+- name: Verify LDAP changes
+ assert:
+ that: "{{ results['changed'] }}"
+ msg: "Unexpected results!"
+
+- name: (Repeat) Define a default LDAP domain, utilizing defaults where possible (no change)
+ na_santricity_ldap:
+ <<: *creds
+ <<: *info
+ state: present
+ identifier: test1
+ role_mappings: "{{ role_mappings[0] }}"
+ register: results
+- name: Verify LDAP changes
+ assert:
+ that: "{{ not results['changed'] }}"
+ msg: "Unexpected results!"
+
+- name: (Repeat) Define a default LDAP domain, utilizing defaults where possible (change)
+ na_santricity_ldap:
+ <<: *creds
+ <<: *info
+ state: present
+ identifier: test1
+ role_mappings: "{{ role_mappings[1] }}"
+ register: results
+- name: Verify LDAP changes
+ assert:
+ that: "{{ results['changed'] }}"
+ msg: "Unexpected results!"
+
+- name: Delete default LDAP domain
+ na_santricity_ldap:
+ <<: *creds
+ state: absent
+ identifier: test1
+ register: results
+- name: Verify LDAP changes
+ assert:
+ that: "{{ results['changed'] }}"
+ msg: "Unexpected results!" \ No newline at end of file
diff --git a/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_lun_mapping/tasks/main.yml b/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_lun_mapping/tasks/main.yml
new file mode 100644
index 000000000..37955fbd2
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_lun_mapping/tasks/main.yml
@@ -0,0 +1,318 @@
+# Test code for the na_santricity_lun_mapping module
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+- name: Set facts for na_santricity_host module's intergration test.
+ set_fact:
+ credentials: &creds
+ ssid: "{{ ssid }}"
+ api_url: "{{ base_url }}"
+ api_username: "{{ username }}"
+ api_password: "{{ password }}"
+ validate_certs: "{{ validate_cert }}"
+
+# ****************************************************
+# *** Setup test hosts, storage pools, and volumes ***
+# ****************************************************
+- name: Create host for host mapping
+ na_santricity_host:
+ <<: *creds
+ state: present
+ name: test_host_mapping_host
+ host_type: 27
+- na_santricity_host:
+ <<: *creds
+ state: present
+ name: test_host1
+ host_type: 27
+- na_santricity_host:
+ <<: *creds
+ state: present
+ name: test_host2
+ host_type: 27
+- name: Create storage pool for host mapping
+ na_santricity_storagepool:
+ <<: *creds
+ state: present
+ name: test_host_mapping_storage_pool
+ raid_level: raid0
+ criteria_min_usable_capacity: 1
+- name: Create volume for host mapping
+ na_santricity_volume:
+ <<: *creds
+ state: present
+ name: test_host_mapping_volume
+ storage_pool_name: test_host_mapping_storage_pool
+ size: 1
+- name: Create volume for host mapping
+ na_santricity_volume:
+ <<: *creds
+ state: present
+ name: test_host_mapping_volume2
+ storage_pool_name: test_host_mapping_storage_pool
+ size: 1
+
+# **********************************************
+# *** Create new lun between host and volume ***
+# **********************************************
+- name: Create na_santricity_lun_mapping
+ na_santricity_lun_mapping:
+ <<: *creds
+ state: present
+ target: test_host_mapping_host
+ volume: test_host_mapping_volume
+ register: result
+
+- name: Verify lun mapping
+ uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/graph/xpath-filter?query=//volume[name='test_host_mapping_volume']"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ body_format: json
+ validate_certs: no
+ register: current
+
+- assert:
+ that: "{{ item['mapped'] }}"
+ msg: "Lun failed to be created."
+ loop: "{{ lookup('list', current.json)}}"
+
+# QUICK VERIFICATION OF MISMATCHING TARGET/TARGET_TYPE - GOOD
+#- name: Create na_santricity_lun_mapping
+# na_santricity_lun_mapping:
+# <<: *creds
+# state: present
+# target: test_host_mapping_host
+# volume: test_host_mapping_volume
+# lun: 100
+# target_type: group
+# register: result
+#
+#- pause: seconds=30
+# **************************************************************
+# *** Repeat previous lun creation play and verify unchanged ***
+# **************************************************************
+- name: Repeat lun creation
+ na_santricity_lun_mapping:
+ <<: *creds
+ state: present
+ target: test_host_mapping_host
+ volume: test_host_mapping_volume
+ register: result
+
+- name: Verify lun mapping
+ uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/graph/xpath-filter?query=//volume[name='test_host_mapping_volume']"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ body_format: json
+ validate_certs: no
+ register: current
+
+- assert:
+ that: "{{ item['mapped'] and result.changed==False }}"
+ msg: "Lun failed to be unchanged."
+ loop: "{{ lookup('list', current.json)}}"
+
+# ****************************************************************
+# *** Move existing lun to default target and verify unchanged ***
+# ****************************************************************
+- name: Move lun to default target
+ na_santricity_lun_mapping:
+ <<: *creds
+ state: present
+ volume: test_host_mapping_volume
+ register: result
+
+- name: Verify lun mapping
+ uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/graph/xpath-filter?query=//volume[name='test_host_mapping_volume']"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ body_format: json
+ validate_certs: no
+ register: current
+
+- assert:
+ that: "{{ item['mapped'] }}"
+ msg: "Lun failed to be created."
+ loop: "{{ lookup('list', current.json)}}"
+
+# *****************************************************************
+# *** Move existing lun to specific target and verify unchanged ***
+# *****************************************************************
+- name: Move lun to default target
+ na_santricity_lun_mapping:
+ <<: *creds
+ state: present
+ target: test_host_mapping_host
+ volume: test_host_mapping_volume
+ register: result
+
+- name: Verify lun mapping
+ uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/graph/xpath-filter?query=//volume[name='test_host_mapping_volume']"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ body_format: json
+ validate_certs: no
+ register: current
+
+- assert:
+ that: "{{ item['mapped'] }}"
+ msg: "Lun failed to be created."
+ loop: "{{ lookup('list', current.json)}}"
+
+# *******************************************
+# *** Modify a volume mapping's lun value ***
+# *******************************************
+- name: Change volume mapping's lun value
+ na_santricity_lun_mapping:
+ <<: *creds
+ state: present
+ target: test_host_mapping_host
+ volume: test_host_mapping_volume
+ lun: 100
+ register: result
+
+- pause: seconds=15
+
+- name: Verify lun mapping
+ uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/graph/xpath-filter?query=//volume[name='test_host_mapping_volume']"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ body_format: json
+ validate_certs: no
+ register: current
+
+- assert:
+ that: "{{ result.changed }}"
+ msg: "Lun failed to be unchanged."
+ loop: "{{ lookup('list', current.json)}}"
+
+- name: Verify mapping fails when lun already in use on existing host object
+ na_santricity_lun_mapping:
+ <<: *creds
+ state: present
+ target: test_host_mapping_host
+ volume: test_host_mapping_volume2
+ lun: 100
+ register: result
+ ignore_errors: True
+
+- pause: seconds=15
+
+- assert:
+ that: "{{ not result.changed }}"
+ msg: "Lun succeeded when it should have failed."
+ loop: "{{ lookup('list', current.json)}}"
+
+- name: Verify mapping succeeds when the same lun is used on multiple host objects.
+ na_santricity_lun_mapping:
+ <<: *creds
+ state: present
+ target: test_host1
+ volume: test_host_mapping_volume2
+ lun: 100
+ register: result
+
+- pause: seconds=15
+
+- assert:
+ that: "{{ result.changed }}"
+ msg: "Lun failed to be unchanged."
+ loop: "{{ lookup('list', current.json)}}"
+
+# *************************************************************************************************
+# *** Verify that exact mapping details but different lun results in an unchanged configuration ***
+# *************************************************************************************************
+- name: Verify that exact mapping details but different lun results in an unchanged configuration
+ na_santricity_lun_mapping:
+ <<: *creds
+ state: absent
+ target: test_host_mapping_host
+ volume: test_host_mapping_volume
+ lun: 99
+ register: result
+
+- name: Verify lun mapping
+ uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/graph/xpath-filter?query=//volume[name='test_host_mapping_volume']"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ body_format: json
+ validate_certs: no
+ register: current
+
+- assert:
+ that: "{{ item['mapped'] and not result.changed }}"
+ msg: "Lun failed to be unchanged."
+ loop: "{{ lookup('list', current.json)}}"
+
+# ********************************
+# *** Delete newly created lun ***
+# ********************************
+- name: Delete lun creation
+ na_santricity_lun_mapping:
+ <<: *creds
+ state: absent
+ target: test_host_mapping_host
+ volume: test_host_mapping_volume
+ register: result
+
+- name: Verify lun mapping
+ uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/graph/xpath-filter?query=//volume[name='test_host_mapping_volume']"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ body_format: json
+ validate_certs: no
+ register: current
+
+- assert:
+ that: "{{ not item['mapped'] }}"
+ msg: "Lun failed to be created."
+ loop: "{{ lookup('list', current.json)}}"
+
+# ********************************************************
+# *** Tear down test hosts, storage pools, and volumes ***
+# ********************************************************
+- name: Delete volume for host mapping
+ na_santricity_volume:
+ <<: *creds
+ state: absent
+ name: test_host_mapping_volume
+ storage_pool_name: test_host_mapping_storage_pool
+ size: 1
+- name: Delete volume for host mapping
+ na_santricity_volume:
+ <<: *creds
+ state: absent
+ name: test_host_mapping_volume2
+ storage_pool_name: test_host_mapping_storage_pool
+ size: 1
+- name: Delete storage pool for host mapping
+ na_santricity_storagepool:
+ <<: *creds
+ state: absent
+ name: test_host_mapping_storage_pool
+ raid_level: raid0
+ criteria_min_usable_capacity: 1
+- name: Delete host for host mapping
+ na_santricity_host:
+ <<: *creds
+ state: absent
+ name: test_host_mapping_host
+ host_type_index: 27
+- name: Delete host for host mapping
+ na_santricity_host:
+ <<: *creds
+ state: absent
+ name: test_host2
+ host_type_index: 27
+- name: Delete host for host mapping
+ na_santricity_host:
+ <<: *creds
+ state: absent
+ name: test_host1
+ host_type_index: 27 \ No newline at end of file
diff --git a/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_mgmt_interface/tasks/main.yml b/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_mgmt_interface/tasks/main.yml
new file mode 100644
index 000000000..15aebf4f9
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_mgmt_interface/tasks/main.yml
@@ -0,0 +1,383 @@
+# Test code for the nac_sancticity_mgmt_interface module
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+
+# Note: Ensure controller B has IPv6 enabled, otherwise the first task will fail.
+
+- include_vars: "../../integration_config.yml"
+
+- set_fact:
+ controller_a: '070000000000000000000001'
+ controller_b: '070000000000000000000002'
+ original_channel_a1_info: &channel_a1_info
+ state: enabled
+ address: 10.113.1.192
+ subnet_mask: 255.255.255.0
+ gateway: 10.113.1.1
+ config_method: static
+ dns_config_method: static
+ dns_address: 10.193.0.250
+ dns_address_backup: 10.192.0.250
+ ntp_config_method: static
+ ntp_address: 216.239.35.0
+ ntp_address_backup: 216.239.35.4
+ ssh: true
+ original_channel_b1_info: &channel_b1_info
+ state: enabled
+ address: 10.113.1.193
+ subnet_mask: 255.255.255.0
+ gateway: 10.113.1.1
+ config_method: static
+ dns_config_method: static
+ dns_address: 10.193.0.250
+ dns_address_backup: 10.192.0.250
+ ntp_config_method: static
+ ntp_address: 216.239.35.0
+ ntp_address_backup: 216.239.35.4
+ ssh: true
+ address_info_list: &test_info
+ address: 10.113.1.251
+ subnet_mask: 255.255.255.0
+ gateway: 10.113.1.1
+
+- name: Set controller A port 1 to dhcp
+ netapp_eseries.santricity.na_santricity_mgmt_interface:
+ ssid: "{{ ssid }}"
+ api_url: "{{ base_url }}"
+ api_username: "{{ username }}"
+ api_password: "{{ password }}"
+ validate_certs: "{{ validate_cert }}"
+ controller: A
+ port: "1"
+ config_method: dhcp
+- name: Retrieve the current management interfaces
+ uri:
+ user: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_cert }}"
+ url: "{{ base_url.replace(original_channel_a1_info['address'], original_channel_b1_info['address']) }}storage-systems/{{ ssid }}/configuration/ethernet-interfaces"
+ register: interfaces
+- name: Validate controller A port 1 is set to dhcp
+ assert:
+ that: "{{ (item['controllerRef'] != controller_a or item['channel'] != 1) or item['ipv4AddressConfigMethod'] == 'configDhcp' }}"
+ msg: "Failed to set controller A port 1 to dhcp!"
+ loop: "{{ lookup('list', interfaces['json']) }}"
+
+- name: Restore controller A port 1 to static
+ netapp_eseries.santricity.na_santricity_mgmt_interface:
+ ssid: "{{ ssid }}"
+ api_url: "{{ base_url.replace(original_channel_a1_info['address'], original_channel_b1_info['address']) }}"
+ api_username: "{{ username }}"
+ api_password: "{{ password }}"
+ validate_certs: "{{ validate_cert }}"
+ controller: A
+ port: "1"
+ <<: *channel_a1_info
+
+- name: Disable controller B port 1
+ netapp_eseries.santricity.na_santricity_mgmt_interface:
+ ssid: "{{ ssid }}"
+ api_url: "{{ base_url }}"
+ api_username: "{{ username }}"
+ api_password: "{{ password }}"
+ validate_certs: "{{ validate_cert }}"
+ state: "disabled"
+ port: "1"
+ controller: B
+
+- name: Set controller B port 1 to dhcp
+ netapp_eseries.santricity.na_santricity_mgmt_interface:
+ ssid: "{{ ssid }}"
+ api_url: "{{ base_url }}"
+ api_username: "{{ username }}"
+ api_password: "{{ password }}"
+ validate_certs: "{{ validate_cert }}"
+ state: "enabled"
+ port: "1"
+ controller: B
+ config_method: dhcp
+- name: Retrieve the current management interfaces
+ uri:
+ user: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_cert }}"
+ url: "{{ base_url }}storage-systems/{{ ssid }}/configuration/ethernet-interfaces"
+ register: interfaces
+- name: Validate controller B port 1 is set to dhcp
+ assert:
+ that: "{{ (item['controllerRef'] != controller_b or item['channel'] != 1) or item['ipv4AddressConfigMethod'] == 'configDhcp' }}"
+ msg: "Failed to set controller B port 1 to dhcp!"
+ loop: "{{ lookup('list', interfaces['json']) }}"
+
+- name: Set controller B port 1 to static ip address (changed, check_mode)
+ netapp_eseries.santricity.na_santricity_mgmt_interface:
+ ssid: "{{ ssid }}"
+ api_url: "{{ base_url }}"
+ api_username: "{{ username }}"
+ api_password: "{{ password }}"
+ validate_certs: "{{ validate_cert }}"
+ state: "enabled"
+ port: "1"
+ controller: B
+ config_method: static
+ <<: *test_info
+ check_mode: true
+ register: result
+- name: Retrieve the current management interfaces
+ uri:
+ user: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_cert }}"
+ url: "{{ base_url }}storage-systems/{{ ssid }}/configuration/ethernet-interfaces"
+ register: interfaces
+- name: Validate controller B port 1 is set to static ip address
+ assert:
+ that: "{{ result['changed'] and
+ ((item['controllerRef'] != controller_b or item['channel'] != 1) or
+ item['ipv4AddressConfigMethod'] == 'configDhcp') }}"
+ msg: "Failed to set controller B port 1 to static ip address!"
+ loop: "{{ lookup('list', interfaces['json']) }}"
+
+- name: Set controller B port 1 to static ip address (changed)
+ netapp_eseries.santricity.na_santricity_mgmt_interface:
+ ssid: "{{ ssid }}"
+ api_url: "{{ base_url }}"
+ api_username: "{{ username }}"
+ api_password: "{{ password }}"
+ validate_certs: "{{ validate_cert }}"
+ state: "enabled"
+ port: "1"
+ controller: B
+ config_method: static
+ <<: *test_info
+ register: result
+- name: Retrieve the current management interfaces
+ uri:
+ user: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_cert }}"
+ url: "{{ base_url }}storage-systems/{{ ssid }}/configuration/ethernet-interfaces"
+ register: interfaces
+- name: Validate controller B port 1 is set to static ip address
+ assert:
+ that: "{{ result['changed'] and
+ ((item['controllerRef'] != controller_b or item['channel'] != 1) or
+ (item['ipv4AddressConfigMethod'] == 'configStatic' and
+ item['ipv4Address'] == address_info_list['address'] and
+ item['ipv4SubnetMask'] == address_info_list['subnet_mask'] and
+ item['ipv4GatewayAddress'] == address_info_list['gateway'])) }}"
+ msg: "Failed to set controller B port 1 to static ip address!"
+ loop: "{{ lookup('list', interfaces['json']) }}"
+
+- name: set controller B port 1 dns setting to dhcp
+ netapp_eseries.santricity.na_santricity_mgmt_interface:
+ ssid: "{{ ssid }}"
+ api_url: "{{ base_url }}"
+ api_username: "{{ username }}"
+ api_password: "{{ password }}"
+ validate_certs: "{{ validate_cert }}"
+ state: "enabled"
+ port: "1"
+ controller: B
+ config_method: static
+ <<: *test_info
+ dns_config_method: dhcp
+- name: Retrieve the current management interfaces
+ uri:
+ user: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_cert }}"
+ url: "{{ base_url }}storage-systems/{{ ssid }}/configuration/ethernet-interfaces"
+ register: interfaces
+- name: Validate controller B port 1 is set to dhcp
+ assert:
+ that: "{{ ((item['controllerRef'] != controller_b or item['channel'] != 1) or
+ item['dnsProperties']['acquisitionProperties']['dnsAcquisitionType'] == 'dhcp') }}"
+ msg: "Failed to set controller B port 1 dns setting to dhcp!"
+ loop: "{{ lookup('list', interfaces['json']) }}"
+
+- name: set controller B port 1 dns is set to static (changed)
+ netapp_eseries.santricity.na_santricity_mgmt_interface:
+ ssid: "{{ ssid }}"
+ api_url: "{{ base_url }}"
+ api_username: "{{ username }}"
+ api_password: "{{ password }}"
+ validate_certs: "{{ validate_cert }}"
+ state: "enabled"
+ port: "1"
+ controller: B
+ config_method: static
+ <<: *test_info
+ dns_config_method: static
+ dns_address: 192.168.1.1
+ dns_address_backup: 192.168.1.2
+ register: result
+- name: Retrieve the current management interfaces
+ uri:
+ user: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_cert }}"
+ url: "{{ base_url }}storage-systems/{{ ssid }}/configuration/ethernet-interfaces"
+ register: interfaces
+- name: Validate controller B port 1 dns is set to static
+ assert:
+ that: "{{ result['changed'] and
+ ((item['controllerRef'] != controller_b or item['channel'] != 1) or
+ (item['dnsProperties']['acquisitionProperties']['dnsAcquisitionType'] == 'stat') and
+ item['dnsProperties']['acquisitionProperties']['dnsServers'][0]['addressType'] == 'ipv4' and
+ item['dnsProperties']['acquisitionProperties']['dnsServers'][0]['ipv4Address'] == '192.168.1.1' and
+ item['dnsProperties']['acquisitionProperties']['dnsServers'][1]['addressType'] == 'ipv4' and
+ item['dnsProperties']['acquisitionProperties']['dnsServers'][1]['ipv4Address'] == '192.168.1.2') }}"
+ msg: "Failed to set controller B port 1 dns setting to static!"
+ loop: "{{ lookup('list', interfaces['json']) }}"
+
+- name: disable controller B port 1 ntp settings (changed)
+ netapp_eseries.santricity.na_santricity_mgmt_interface:
+ ssid: "{{ ssid }}"
+ api_url: "{{ base_url }}"
+ api_username: "{{ username }}"
+ api_password: "{{ password }}"
+ validate_certs: "{{ validate_cert }}"
+ state: "enabled"
+ port: "1"
+ controller: B
+ config_method: static
+ <<: *test_info
+ ntp_config_method: disabled
+- name: Retrieve the current management interfaces
+ uri:
+ user: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_cert }}"
+ url: "{{ base_url }}storage-systems/{{ ssid }}/configuration/ethernet-interfaces"
+ register: interfaces
+- name: Validate controller B port 1 is set to dhcp
+ assert:
+ that: "{{ (item['controllerRef'] != controller_b or item['channel'] != 1) or
+ item['ntpProperties']['acquisitionProperties']['ntpAcquisitionType'] == 'disabled' }}"
+ msg: "Failed to disable controller B port 1 ntp!"
+ loop: "{{ lookup('list', interfaces['json']) }}"
+
+- name: set controller B port 1 ntp setting to dhcp (changed)
+ netapp_eseries.santricity.na_santricity_mgmt_interface:
+ ssid: "{{ ssid }}"
+ api_url: "{{ base_url }}"
+ api_username: "{{ username }}"
+ api_password: "{{ password }}"
+ validate_certs: "{{ validate_cert }}"
+ state: "enabled"
+ port: "1"
+ controller: B
+ config_method: static
+ <<: *test_info
+ ntp_config_method: dhcp
+ register: result
+- name: Retrieve the current management interfaces
+ uri:
+ user: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_cert }}"
+ url: "{{ base_url }}storage-systems/{{ ssid }}/configuration/ethernet-interfaces"
+ register: interfaces
+- name: Validate controller B port 1 is set to dhcp
+ assert:
+ that: "{{ result['changed'] and
+ ((item['controllerRef'] != controller_b or item['channel'] != 1) or
+ item['ntpProperties']['acquisitionProperties']['ntpAcquisitionType'] == 'dhcp') }}"
+ msg: "Failed to set controller B port 1 ntp setting to dhcp!"
+ loop: "{{ lookup('list', interfaces['json']) }}"
+
+- name: set controller B port 1 ntp setting to static (changed)
+ netapp_eseries.santricity.na_santricity_mgmt_interface:
+ ssid: "{{ ssid }}"
+ api_url: "{{ base_url }}"
+ api_username: "{{ username }}"
+ api_password: "{{ password }}"
+ validate_certs: "{{ validate_cert }}"
+ state: "enabled"
+ port: "1"
+ controller: B
+ config_method: static
+ <<: *test_info
+ ntp_config_method: static
+ ntp_address: 192.168.1.1
+ ntp_address_backup: 192.168.1.2
+ register: result
+- name: Retrieve the current management interfaces
+ uri:
+ user: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_cert }}"
+ url: "{{ base_url }}storage-systems/{{ ssid }}/configuration/ethernet-interfaces"
+ register: interfaces
+- name: Validate controller B port 1 is set to static
+ assert:
+ that: "{{ result['changed'] and
+ ((item['controllerRef'] != controller_b or item['channel'] != 1) or
+ (item['ntpProperties']['acquisitionProperties']['ntpAcquisitionType'] == 'stat') and
+ item['ntpProperties']['acquisitionProperties']['ntpServers'][0]['addrType'] == 'ipvx' and
+ item['ntpProperties']['acquisitionProperties']['ntpServers'][0]['ipvxAddress']['addressType'] == 'ipv4' and
+ item['ntpProperties']['acquisitionProperties']['ntpServers'][0]['ipvxAddress']['ipv4Address'] == '192.168.1.1' and
+ item['ntpProperties']['acquisitionProperties']['ntpServers'][1]['addrType'] == 'ipvx' and
+ item['ntpProperties']['acquisitionProperties']['ntpServers'][1]['ipvxAddress']['addressType'] == 'ipv4' and
+ item['ntpProperties']['acquisitionProperties']['ntpServers'][1]['ipvxAddress']['ipv4Address'] == '192.168.1.2') }}"
+ msg: "Failed to set controller B port 1 ntp setting to static!"
+ loop: "{{ lookup('list', interfaces['json']) }}"
+
+- name: disable controller B ssh
+ netapp_eseries.santricity.na_santricity_mgmt_interface:
+ ssid: "{{ ssid }}"
+ api_url: "{{ base_url }}"
+ api_username: "{{ username }}"
+ api_password: "{{ password }}"
+ validate_certs: "{{ validate_cert }}"
+ port: "1"
+ controller: B
+ ssh: false
+- name: Retrieve the current management interfaces
+ uri:
+ user: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_cert }}"
+ url: "{{ base_url }}storage-systems/{{ ssid }}/controllers"
+ register: controllers
+- name: Validate controller B ssh is enabled
+ assert:
+ that: "{{ item['controllerRef'] != controller_b or not item['networkSettings']['remoteAccessEnabled'] }}"
+ msg: "Failed to disable controller B ssh!"
+ loop: "{{ lookup('list', controllers['json']) }}"
+
+- name: enable controller B ssh (changed)
+ netapp_eseries.santricity.na_santricity_mgmt_interface:
+ ssid: "{{ ssid }}"
+ api_url: "{{ base_url }}"
+ api_username: "{{ username }}"
+ api_password: "{{ password }}"
+ validate_certs: "{{ validate_cert }}"
+ port: "1"
+ controller: B
+ ssh: true
+ register: result
+- name: Retrieve the current management interfaces
+ uri:
+ user: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_cert }}"
+ url: "{{ base_url }}storage-systems/{{ ssid }}/controllers"
+ register: controllers
+- name: Validate controller B ssh is enabled
+ assert:
+ that: "{{ result['changed'] and (item['controllerRef'] != controller_b or item['networkSettings']['remoteAccessEnabled']) }}"
+ msg: "Failed to set controller B port 1 ntp setting to static!"
+ loop: "{{ lookup('list', controllers['json']) }}"
+
+- name: Restore controller B port 1 settings
+ netapp_eseries.santricity.na_santricity_mgmt_interface:
+ ssid: "{{ ssid }}"
+ api_url: "{{ base_url }}"
+ api_username: "{{ username }}"
+ api_password: "{{ password }}"
+ validate_certs: "{{ validate_cert }}"
+ port: "1"
+ controller: B
+ <<: *channel_b1_info
diff --git a/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_nvme_interface/tasks/ib.yml b/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_nvme_interface/tasks/ib.yml
new file mode 100644
index 000000000..260f3d7ff
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_nvme_interface/tasks/ib.yml
@@ -0,0 +1,88 @@
+# Test code for the na_santricity_nvme_interface module
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+- name: Set facts for na_santricity_nvme_interface module test
+ set_fact:
+ credentials: &creds
+ ssid: 1
+ api_url: https://192.168.1.100:8443/devmgr/v2/
+ api_username: admin
+ api_password: adminpassword
+ validate_certs: false
+ interface_a1_ip: 192.168.1.1
+ interface_b1_ip: 192.168.2.1
+
+- name: Set the initial nvme interfaces
+ na_santricity_nvme_interface:
+ <<: *creds
+ controller: "{{ item[0] }}"
+ channel: "{{ item[1] }}"
+ address: "{{ item[2] }}"
+ loop:
+ - ["A", "1", "{{ interface_a1_ip }}"]
+ - ["B", "1", "{{ interface_b1_ip }}"]
+
+- name: Repeat the initial nvme interfaces (no change)
+ na_santricity_nvme_interface:
+ <<: *creds
+ controller: "{{ item[0] }}"
+ channel: "{{ item[1] }}"
+ address: "{{ item[2] }}"
+ register: results
+ loop:
+ - ["A", "1", "{{ interface_a1_ip }}"]
+ - ["B", "1", "{{ interface_b1_ip }}"]
+- name: Verify no changes were made
+ assert:
+ that: "{{ not item['changed'] }}"
+ msg: "Unexpected results!"
+ loop: "{{ lookup('list', results['results']) }}"
+
+- name: Change the initial nvme interfaces (changed, check_mode)
+ na_santricity_nvme_interface:
+ <<: *creds
+ controller: "{{ item[0] }}"
+ channel: "{{ item[1] }}"
+ address: "{{ item[2] }}"
+ register: results
+ loop:
+ - ["A", "1", "192.168.3.230"]
+ - ["B", "1", "192.168.3.231"]
+ check_mode: true
+- name: Verify no changes were made
+ assert:
+ that: "{{ item['changed'] }}"
+ msg: "Unexpected results!"
+ loop: "{{ lookup('list', results['results']) }}"
+
+- name: Change the initial nvme interfaces (changed)
+ na_santricity_nvme_interface:
+ <<: *creds
+ controller: "{{ item[0] }}"
+ channel: "{{ item[1] }}"
+ address: "{{ item[2] }}"
+ register: results
+ loop:
+ - ["A", "1", "192.168.3.230"]
+ - ["B", "1", "192.168.3.231"]
+- name: Verify no changes were made
+ assert:
+ that: "{{ item['changed'] }}"
+ msg: "Unexpected results!"
+ loop: "{{ lookup('list', results['results']) }}"
+
+- name: Revert to the initial nvme interfaces (changed)
+ na_santricity_nvme_interface:
+ <<: *creds
+ controller: "{{ item[0] }}"
+ channel: "{{ item[1] }}"
+ address: "{{ item[2] }}"
+ register: results
+ loop:
+ - ["A", "1", "{{ interface_a1_ip }}"]
+ - ["B", "1", "{{ interface_b1_ip }}"]
+- name: Verify no changes were made
+ assert:
+ that: "{{ item['changed'] }}"
+ msg: "Unexpected results!"
+ loop: "{{ lookup('list', results['results']) }}" \ No newline at end of file
diff --git a/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_nvme_interface/tasks/main.yml b/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_nvme_interface/tasks/main.yml
new file mode 100644
index 000000000..82f5ba168
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_nvme_interface/tasks/main.yml
@@ -0,0 +1,2 @@
+- include_tasks: ib.yml
+- include_tasks: roce.yml
diff --git a/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_nvme_interface/tasks/roce.yml b/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_nvme_interface/tasks/roce.yml
new file mode 100644
index 000000000..70bfe55d4
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_nvme_interface/tasks/roce.yml
@@ -0,0 +1,105 @@
+# Test code for the na_santricity_nvme_interface module
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+- name: Set facts for na_santricity_nvme_interface module test
+ set_fact:
+ credentials: &creds
+ ssid: 1
+ api_url: https://192.168.1.100:8443/devmgr/v2/
+ api_username: admin
+ api_password: adminpassword
+ validate_certs: false
+ original_interface: &iface
+ address: 192.168.131.101
+ subnet_mask: 255.255.255.0
+ gateway: 0.0.0.0
+
+- name: Ensure NVMeoF interfaces are properly configured.
+ na_santricity_nvme_interface:
+ <<: *creds
+ controller: A
+ channel: 1
+ config_method: dhcp
+ mtu: 9000
+ speed: 25
+
+- name: Ensure NVMeoF interfaces are properly configured (no change).
+ na_santricity_nvme_interface:
+ <<: *creds
+ controller: A
+ channel: 1
+ config_method: dhcp
+ mtu: 9000
+ speed: 25
+ register: results
+- name: Verify results
+ assert:
+ that: "{{ not results['changed'] }}"
+ msg: "Unexpected results!"
+
+- name: Ensure NVMe interfaces are properly configured. (change, check_mode)
+ na_santricity_nvme_interface:
+ <<: *creds
+ controller: A
+ channel: 1
+ config_method: static
+ address: 192.168.130.200
+ subnet_mask: 255.255.254.0
+ gateway: 192.168.130.1
+ mtu: 1500
+ speed: auto
+ check_mode: true
+ register: results
+- name: Verify results
+ assert:
+ that: "{{ results['changed'] }}"
+ msg: "Unexpected results!"
+
+- name: (Repeat) Ensure NVMe interfaces are properly configured. (change)
+ na_santricity_nvme_interface:
+ <<: *creds
+ controller: A
+ channel: 1
+ config_method: static
+ address: 192.168.130.200
+ subnet_mask: 255.255.254.0
+ gateway: 192.168.130.1
+ mtu: 1500
+ speed: auto
+ register: results
+- name: Verify results
+ assert:
+ that: "{{ results['changed'] }}"
+ msg: "Unexpected results!"
+
+- name: (Repeat) Ensure NVMe interfaces are properly configured. (no change)
+ na_santricity_nvme_interface:
+ <<: *creds
+ controller: A
+ channel: 1
+ config_method: static
+ address: 192.168.130.200
+ subnet_mask: 255.255.254.0
+ gateway: 192.168.130.1
+ mtu: 1500
+ speed: auto
+ register: results
+- name: Verify results
+ assert:
+ that: "{{ not results['changed'] }}"
+ msg: "Unexpected results!"
+
+- name: Ensure NVMeoF interfaces are properly configured. (change)
+ na_santricity_nvme_interface:
+ <<: *creds
+ <<: *iface
+ controller: A
+ channel: 1
+ config_method: static
+ mtu: 1500
+ speed: auto
+ register: results
+- name: Verify results
+ assert:
+ that: "{{ results['changed'] }}"
+ msg: "Unexpected results!"
diff --git a/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_proxy_drive_firmware_upload/tasks/main.yml b/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_proxy_drive_firmware_upload/tasks/main.yml
new file mode 100644
index 000000000..c261abffa
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_proxy_drive_firmware_upload/tasks/main.yml
@@ -0,0 +1,65 @@
+# Test code for the na_santricity_proxy_drive_firmware_upload module
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+- name: Test na_santricity_proxy_drive_firmware_upload module
+ set_fact:
+ credentials: &creds
+ api_url: "{{ proxy_base_url }}"
+ api_username: "{{ proxy_username }}"
+ api_password: "{{ proxy_password }}"
+ validate_certs: "{{ proxy_validate_cert }}"
+ firmware:
+ - /home/swartzn/Downloads/drive firmware/D_PX04SVQ160_30603182_MS00_5600_001.dlp
+ - /home/swartzn/Downloads/drive firmware/D_PX04SVQ160_30603299_MSB6_224C_705.dlp
+
+- name: Clear any existing proxy drive firmware
+ na_santricity_proxy_drive_firmware_upload:
+ <<: *creds
+
+- name: Clear any existing proxy drive firmware (no changed)
+ na_santricity_proxy_drive_firmware_upload:
+ <<: *creds
+ register: results
+- name: Verify all drive firmware has been removed
+ assert:
+ that: "{{ not results['changed'] }}"
+ msg: "Drive firmware exists!"
+
+- name: Add drive firmware to proxy (changed, check_mode)
+ na_santricity_proxy_drive_firmware_upload:
+ <<: *creds
+ firmware: "{{ firmware }}"
+ register: results
+ check_mode: true
+- name: Verify drive firmware has been added
+ assert:
+ that: "{{ results['changed'] }}"
+ msg: "Drive firmware exists!"
+
+- name: Add drive firmware to proxy (changed)
+ na_santricity_proxy_drive_firmware_upload:
+ <<: *creds
+ firmware: "{{ firmware }}"
+ register: results
+- name: Verify drive firmware has been added
+ assert:
+ that: "{{ results['changed'] }}"
+ msg: "Drive firmware exists!"
+
+- name: Remove drive firmware to proxy (changed)
+ na_santricity_proxy_drive_firmware_upload:
+ <<: *creds
+ register: results
+- name: Verify drive firmware has been added
+ assert:
+ that: "{{ results['changed'] }}"
+ msg: "Drive firmware exists!"
+
+- name: Remove drive firmware to proxy (no changed)
+ na_santricity_proxy_drive_firmware_upload:
+ <<: *creds
+ register: results
+- name: Verify drive firmware has been added
+ assert:
+ that: "{{ not results['changed'] }}"
+ msg: "Drive firmware exists!"
diff --git a/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_proxy_firmware_upload/tasks/main.yml b/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_proxy_firmware_upload/tasks/main.yml
new file mode 100644
index 000000000..d4b9f02dc
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_proxy_firmware_upload/tasks/main.yml
@@ -0,0 +1,65 @@
+# Test code for the na_santricity_proxy_firmware_upload module
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+- name: Test na_santricity_proxy_firmware_upload module
+ set_fact:
+ credentials: &creds
+ api_url: "{{ proxy_base_url }}"
+ api_username: "{{ proxy_username }}"
+ api_password: "{{ proxy_password }}"
+ validate_certs: "{{ proxy_validate_cert }}"
+ firmware:
+ - /home/swartzn/Downloads/N5600-840834-D03.dlp
+ - /home/swartzn/Downloads/RC_08405000_m3_e10_840_5600.dlp
+
+- name: Clear any existing proxy drive firmware
+ na_santricity_proxy_firmware_upload:
+ <<: *creds
+
+- name: Clear any existing proxy drive firmware (no changed)
+ na_santricity_proxy_firmware_upload:
+ <<: *creds
+ register: results
+- name: Verify all drive firmware has been removed
+ assert:
+ that: "{{ not results['changed'] }}"
+ msg: "Drive firmware exists!"
+
+- name: Add drive firmware to proxy (changed, check_mode)
+ na_santricity_proxy_firmware_upload:
+ <<: *creds
+ firmware: "{{ firmware }}"
+ register: results
+ check_mode: true
+- name: Verify drive firmware has been added
+ assert:
+ that: "{{ results['changed'] }}"
+ msg: "Drive firmware exists!"
+
+- name: Add drive firmware to proxy (changed)
+ na_santricity_proxy_firmware_upload:
+ <<: *creds
+ firmware: "{{ firmware }}"
+ register: results
+- name: Verify drive firmware has been added
+ assert:
+ that: "{{ results['changed'] }}"
+ msg: "Drive firmware exists!"
+
+- name: Remove drive firmware to proxy (changed)
+ na_santricity_proxy_firmware_upload:
+ <<: *creds
+ register: results
+- name: Verify drive firmware has been added
+ assert:
+ that: "{{ results['changed'] }}"
+ msg: "Drive firmware exists!"
+
+- name: Remove drive firmware to proxy (no changed)
+ na_santricity_proxy_firmware_upload:
+ <<: *creds
+ register: results
+- name: Verify drive firmware has been added
+ assert:
+ that: "{{ not results['changed'] }}"
+ msg: "Drive firmware exists!"
diff --git a/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_proxy_systems/tasks/main.yml b/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_proxy_systems/tasks/main.yml
new file mode 100644
index 000000000..1475cda99
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_proxy_systems/tasks/main.yml
@@ -0,0 +1,160 @@
+# Test code for the na_santricity_proxy_systems module
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+
+# NOTE: Running this test back-to-back can result in a 10 minute lock-out
+
+- name: Test na_santricity_proxy_firmware_upload module
+ set_fact:
+ credentials: &creds
+ api_url: "{{ proxy_base_url }}"
+ api_username: "{{ proxy_username }}"
+ api_password: "{{ proxy_password }}"
+ validate_certs: "{{ proxy_validate_cert }}"
+ subnet: 192.168.1.10/24
+ small_subnet: 192.168.1.10/31 # Be sure to know the systems included in this subnet since they will be discovered and not specified.
+ systems:
+ - ssid: "10"
+ serial: "021633035190"
+ password: "password"
+ - ssid: "20"
+ serial: "711214000794"
+ password: "password"
+
+- name: Ensure no systems have been added.
+ na_santricity_proxy_systems:
+ <<: *creds
+
+- name: Add multiple systems using serial numbers and a common password (change, check_mode)
+ na_santricity_proxy_systems:
+ <<: *creds
+ subnet_mask: "{{ subnet }}"
+ password: "{{ systems[0]['password'] }}"
+ systems: |-
+ {%- set output=[] %}
+ {%- for system in systems %}
+ {%- if output.append({"serial": system["serial"]}) %}{%- endif %}
+ {%- endfor %}
+ {{ output }}
+ check_mode: true
+ register: results
+- name: Verify results
+ assert:
+ that: "{{ results['changed'] }}"
+ msg: "Unexpected results!"
+
+- name: (Repeat) Add multiple systems using serial numbers and a common password (change)
+ na_santricity_proxy_systems:
+ <<: *creds
+ subnet_mask: "{{ subnet }}"
+ password: "{{ systems[0]['password'] }}"
+ systems: |-
+ {%- set output=[] %}
+ {%- for system in systems %}
+ {%- if output.append({"serial": system["serial"]}) %}{%- endif %}
+ {%- endfor %}
+ {{ output }}
+ register: results
+- name: Verify results
+ assert:
+ that: "{{ results['changed'] }}"
+ msg: "Unexpected results!"
+
+- name: (Repeat) Add multiple systems using serial numbers and a common password (no change)
+ na_santricity_proxy_systems:
+ <<: *creds
+ subnet_mask: "{{ subnet }}"
+ password: "{{ systems[0]['password'] }}"
+ systems: |-
+ {%- set output=[] %}
+ {%- for system in systems %}
+ {%- if output.append({"serial": system["serial"]}) %}{%- endif %}
+ {%- endfor %}
+ {{ output }}
+ register: results
+- name: Verify results
+ assert:
+ that: "{{ not results['changed'] }}"
+ msg: "Unexpected results!"
+
+- name: Remove all systems. (change)
+ na_santricity_proxy_systems:
+ <<: *creds
+ register: results
+- name: Verify results
+ assert:
+ that: "{{ results['changed'] }}"
+ msg: "Unexpected results!"
+
+- name: Add multiple systems using serial numbers (change, check_mode)
+ na_santricity_proxy_systems:
+ <<: *creds
+ subnet_mask: "{{ subnet }}"
+ systems: "{{ systems }}"
+ check_mode: true
+ register: results
+- name: Verify results
+ assert:
+ that: "{{ results['changed'] }}"
+ msg: "Unexpected results!"
+
+- name: Add multiple systems using serial numbers (change)
+ na_santricity_proxy_systems:
+ <<: *creds
+ subnet_mask: "{{ subnet }}"
+ systems: "{{ systems }}"
+ register: results
+- name: Verify results
+ assert:
+ that: "{{ results['changed'] }}"
+ msg: "Unexpected results!"
+
+- name: Add multiple systems using serial numbers (no change)
+ na_santricity_proxy_systems:
+ <<: *creds
+ subnet_mask: "{{ subnet }}"
+ systems: "{{ systems }}"
+ register: results
+- name: Verify results
+ assert:
+ that: "{{ not results['changed'] }}"
+ msg: "Unexpected results!"
+
+- name: Remove all systems. (change)
+ na_santricity_proxy_systems:
+ <<: *creds
+ register: results
+- name: Verify results
+ assert:
+ that: "{{ results['changed'] }}"
+ msg: "Unexpected results!"
+
+- name: Add any other available system on the subnet (change)
+ na_santricity_proxy_systems:
+ <<: *creds
+ subnet_mask: "{{ small_subnet }}"
+ add_discovered_systems: true
+ register: results
+- name: Verify results
+ assert:
+ that: "{{ results['changed'] }}"
+ msg: "Unexpected results!"
+
+- name: Remove all systems. (change, check_mode)
+ na_santricity_proxy_systems:
+ <<: *creds
+ register: results
+ check_mode: true
+- name: Verify results
+ assert:
+ that: "{{ results['changed'] }}"
+ msg: "Unexpected results!"
+
+- name: Remove all systems. (change)
+ na_santricity_proxy_systems:
+ <<: *creds
+ register: results
+- name: Verify results
+ assert:
+ that: "{{ results['changed'] }}"
+ msg: "Unexpected results!"
diff --git a/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_storagepool/tasks/main.yml b/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_storagepool/tasks/main.yml
new file mode 100644
index 000000000..664df5951
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_storagepool/tasks/main.yml
@@ -0,0 +1,1038 @@
+# Test code for the na_santricity_storagepool module
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+#
+# Raid levels tested: raid0, raid1, raid5, raid6, disk pool
+# Actions covered: create w/capacity, create w/drive count, repeat create (no changes), extend w/capacity,
+# extend w/drive count, delete, migrate raid levels (raid0->raid6, 1->5, 5->1, 6->0),
+# secure pool for raid0, erasing drives on creation, erasing drives on deletion,
+# setting reserve drive count for ddp,
+
+- name: Set facts for na_santricity_host module's intergration test.
+ set_fact:
+ credentials: &creds
+ ssid: "{{ ssid }}"
+ api_url: "{{ base_url }}"
+ api_username: "{{ username }}"
+ api_password: "{{ password }}"
+ validate_certs: "{{ validate_cert }}"
+
+# Ensure that test starts without storage pools
+- name: Remove simple storage pool
+ na_santricity_storagepool:
+ <<: *creds
+ state: absent
+ erase_secured_drives: yes
+ name: "{{ item }}"
+ loop:
+ - raid0_storage
+ - raid1_storage
+ - raid5_storage
+ - raid6_storage
+ - raidDiskPool_storage
+
+# Raid0
+# Create, rerun, extend, and modify raid level.
+- name: Create simple storage pool using raid0.
+ na_santricity_storagepool:
+ <<: *creds
+ state: present
+ name: raid0_storage
+ criteria_min_usable_capacity: 1400
+ raid_level: raid0
+ register: results
+- pause: seconds=5
+- name: Verify storage pool was created
+ uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/storage-pools"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ body_format: json
+ validate_certs: no
+ register: current_storage_pools
+- assert:
+ that: "{{ item.raidLevel == 'raid0' and (item.totalRaidedSpace | int) >= 1503238553600 }}"
+ msg: "raid0 storage pool failed to be created."
+ loop: "{{ lookup('list', storage_pools, wantList=True) }}"
+ vars:
+ storage_pools: "{{ current_storage_pools | json_query('json[?name==`raid0_storage`]') }}"
+
+- name: (Repeat) Create simple storage pool using raid0.
+ na_santricity_storagepool:
+ <<: *creds
+ state: present
+ name: raid0_storage
+ criteria_min_usable_capacity: 1400
+ criteria_size_unit: gb
+ raid_level: raid0
+ register: results
+- pause: seconds=5
+- name: Verify storage pool was created
+ uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/storage-pools"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ body_format: json
+ validate_certs: no
+ register: current_storage_pools
+- assert:
+ that: "{{ not results.changed and item.raidLevel == 'raid0' and (item.totalRaidedSpace | int) >= 1503238553600 }}"
+ msg: "raid0 storage pool failed not to be modified."
+ loop: "{{ lookup('list', storage_pools, wantList=True) }}"
+ vars:
+ storage_pools: "{{ current_storage_pools | json_query('json[?name==`raid0_storage`]') }}"
+
+- name: Extend storage pool to 2400gb minimum usable capacity.
+ na_santricity_storagepool:
+ <<: *creds
+ state: present
+ name: raid0_storage
+ criteria_min_usable_capacity: 2400
+ criteria_size_unit: gb
+ raid_level: raid0
+ register: results
+- pause: seconds=5
+- name: Verify storage pool was created
+ uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/storage-pools"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ body_format: json
+ validate_certs: no
+ register: current_storage_pools
+- assert:
+ that: "{{ item.raidLevel == 'raid0' and (item.totalRaidedSpace | int) >= 2576980377600 }}"
+ msg: "raid0 storage pool using raid0 failed to be extended to a minimum of 2400gb."
+ loop: "{{ lookup('list', storage_pools, wantList=True) }}"
+ vars:
+ storage_pools: "{{ current_storage_pools | json_query('json[?name==`raid0_storage`]') }}"
+
+- name: Expand simple storage pool using raid0.
+ na_santricity_storagepool:
+ <<: *creds
+ state: present
+ name: raid0_storage
+ criteria_drive_count: 6
+ raid_level: raid0
+ register: results
+- pause: seconds=5
+- name: Verify storage pool was created
+ uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/storage-pools"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ body_format: json
+ validate_certs: no
+ register: current_storage_pools
+- uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/drives"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ body_format: json
+ validate_certs: no
+ register: current_drives
+- assert:
+ that: "{{ results.raidLevel == 'raid0' and
+ (current_drives.json | json_query(count_query) | length) == 6 }}"
+ msg: "raid0 storage pool failed to be extended to 6 drives."
+ vars:
+ count_query: "[?currentVolumeGroupRef=='{{ results.volumeGroupRef }}'].currentVolumeGroupRef"
+
+- name: Migrate raid0 storage pool to raid6.
+ na_santricity_storagepool:
+ <<: *creds
+ state: present
+ name: raid0_storage
+ criteria_drive_count: 6
+ raid_level: raid6
+ register: results
+- pause: seconds=5
+- name: Verify storage pool was created
+ uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/storage-pools"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ body_format: json
+ validate_certs: no
+ register: current_storage_pools
+- uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/drives"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ body_format: json
+ validate_certs: no
+ register: current_drives
+- assert:
+ that: "{{ results.raidLevel == 'raid6' and
+ (current_drives.json | json_query(count_query) | length) == 6 }}"
+ msg: "raid0 storage pool failed to migrate to raid6"
+ vars:
+ count_query: "[?currentVolumeGroupRef=='{{ results.volumeGroupRef }}'].currentVolumeGroupRef"
+
+- name: Remove simple storage pool
+ na_santricity_storagepool:
+ <<: *creds
+ state: absent
+ name: "{{ item }}"
+ loop:
+ - raid0_storage
+
+
+# Raid1
+# Create, rerun, extend, and modify raid level.
+- name: Create simple storage pool using raid1.
+ na_santricity_storagepool:
+ <<: *creds
+ state: present
+ name: raid1_storage
+ criteria_min_usable_capacity: 1400
+ criteria_size_unit: gb
+ raid_level: raid1
+ register: results
+- pause: seconds=5
+- uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/storage-pools"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ body_format: json
+ validate_certs: no
+ register: current_storage_pools
+- assert:
+ that: "{{ item.raidLevel == 'raid1' and (item.totalRaidedSpace | int) >= 1503238553600 }}"
+ msg: "raid1 storage pool failed to be created."
+ loop: "{{ lookup('list', storage_pools, wantList=True) }}"
+ vars:
+ storage_pools: "{{ current_storage_pools | json_query('json[?name==`raid1_storage`]') }}"
+
+- name: (Repeat) Create simple storage pool using raid1.
+ na_santricity_storagepool:
+ <<: *creds
+ state: present
+ name: raid1_storage
+ criteria_min_usable_capacity: 1400
+ criteria_size_unit: gb
+ raid_level: raid1
+ register: results
+- pause: seconds=5
+- uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/storage-pools"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ body_format: json
+ validate_certs: no
+ register: current_storage_pools
+- assert:
+ that: "{{ not results.changed and item.raidLevel == 'raid1' and (item.totalRaidedSpace | int) >= 1503238553600 }}"
+ msg: "raid1 storage pool failed not to be modified."
+ loop: "{{ lookup('list', storage_pools, wantList=True) }}"
+ vars:
+ storage_pools: "{{ current_storage_pools | json_query('json[?name==`raid1_storage`]') }}"
+
+- name: Expand simple storage pool using raid1.
+ na_santricity_storagepool:
+ <<: *creds
+ state: present
+ name: raid1_storage
+ criteria_drive_count: 6
+ raid_level: raid1
+ register: results
+- pause: seconds=5
+- name: Verify storage pool was created
+ uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/storage-pools"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ body_format: json
+ validate_certs: no
+ register: current_storage_pools
+- uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/drives"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ body_format: json
+ validate_certs: no
+ register: current_drives
+- assert:
+ that: "{{ results.raidLevel == 'raid1' and
+ (current_drives.json | json_query(count_query) | length) == 6 }}"
+ msg: "raid1 storage pool failed to be extended."
+ vars:
+ count_query: "[?currentVolumeGroupRef=='{{ results.volumeGroupRef }}'].currentVolumeGroupRef"
+
+- name: Migrate raid1 storage pool to raid5
+ na_santricity_storagepool:
+ <<: *creds
+ state: present
+ name: raid1_storage
+ criteria_drive_count: 6
+ raid_level: raid5
+ register: results
+- pause: seconds=5
+- name: Verify storage pool was created
+ uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/storage-pools"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ body_format: json
+ validate_certs: no
+ register: current_storage_pools
+- uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/drives"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ body_format: json
+ validate_certs: no
+ register: current_drives
+- assert:
+ that: "{{ results.raidLevel == 'raid5' and
+ (current_drives.json | json_query(count_query) | length) == 6 }}"
+ msg: "raid1 storage pool failed to migrate to raid5."
+ vars:
+ count_query: "[?currentVolumeGroupRef=='{{ results.volumeGroupRef }}'].currentVolumeGroupRef"
+
+- name: Remove simple storage pool
+ na_santricity_storagepool:
+ <<: *creds
+ state: absent
+ name: "{{ item }}"
+ loop:
+ - raid1_storage
+
+
+# Raid5
+# Create, rerun, extend, and modify raid level.
+- name: Create simple storage pool using raid5.
+ na_santricity_storagepool:
+ <<: *creds
+ state: present
+ name: raid5_storage
+ criteria_drive_count: 6
+ raid_level: raid5
+ register: results
+- pause: seconds=5
+- name: Verify storage pool was created
+ uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/storage-pools"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ body_format: json
+ validate_certs: no
+ register: current_storage_pools
+- uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/drives"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ body_format: json
+ validate_certs: no
+ register: current_drives
+- assert:
+ that: "{{ results.raidLevel == 'raid5' and
+ (current_drives.json | json_query(count_query) | length) == 6 }}"
+ msg: "raid5 storage pool failed to be created."
+ vars:
+ count_query: "[?currentVolumeGroupRef=='{{ results.volumeGroupRef }}'].currentVolumeGroupRef"
+
+- name: (Rerun) Create simple storage pool using raid5.
+ na_santricity_storagepool:
+ <<: *creds
+ state: present
+ name: raid5_storage
+ criteria_drive_count: 6
+ raid_level: raid5
+ register: results
+- pause: seconds=5
+- name: Verify storage pool was created
+ uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/storage-pools"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ body_format: json
+ validate_certs: no
+ register: current_storage_pools
+- uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/drives"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ body_format: json
+ validate_certs: no
+ register: current_drives
+- assert:
+ that: "{{ not results.changed and results.raidLevel == 'raid5' and
+ (current_drives.json | json_query(count_query) | length) == 6 }}"
+ msg: "raid5 storage pool failed not to be modified."
+ vars:
+ count_query: "[?currentVolumeGroupRef=='{{ results.volumeGroupRef }}'].currentVolumeGroupRef"
+
+- name: Expand simple storage pool using raid5.
+ na_santricity_storagepool:
+ <<: *creds
+ state: present
+ name: raid5_storage
+ criteria_drive_count: 8
+ raid_level: raid5
+ register: results
+- pause: seconds=5
+- name: Verify storage pool was created
+ uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/storage-pools"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ body_format: json
+ validate_certs: no
+ register: current_storage_pools
+- uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/drives"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ body_format: json
+ validate_certs: no
+ register: current_drives
+- assert:
+ that: "{{ results.raidLevel == 'raid5' and
+ (current_drives.json | json_query(count_query) | length) == 8}}"
+ msg: "raid5 storage pool failed to be modified to 8 drives."
+ vars:
+ count_query: "[?currentVolumeGroupRef=='{{ results.volumeGroupRef }}'].currentVolumeGroupRef"
+
+- name: Migrate raid5 storage pool to raid1
+ na_santricity_storagepool:
+ <<: *creds
+ state: present
+ name: raid5_storage
+ criteria_drive_count: 8
+ raid_level: raid1
+ register: results
+- pause: seconds=5
+- name: Verify storage pool was created
+ uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/storage-pools"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ body_format: json
+ validate_certs: no
+ register: current_storage_pools
+- uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/drives"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ body_format: json
+ validate_certs: no
+ register: current_drives
+- assert:
+ that: "{{ results.raidLevel == 'raid1' and
+ (current_drives.json | json_query(count_query) | length) == 8}}"
+ msg: "raid5 storage pool failed to migrate to raid1."
+ vars:
+ count_query: "[?currentVolumeGroupRef=='{{ results.volumeGroupRef }}'].currentVolumeGroupRef"
+
+- name: Remove simple storage pool
+ na_santricity_storagepool:
+ <<: *creds
+ state: absent
+ name: "{{ item }}"
+ loop:
+ - raid5_storage
+
+
+# raid6
+# Create, rerun, extend, and modify raid level.
+- name: Create simple storage pool using raid6.
+ na_santricity_storagepool:
+ <<: *creds
+ state: present
+ name: raid6_storage
+ criteria_drive_count: 5
+ raid_level: raid6
+ register: results
+- pause: seconds=5
+- name: Verify storage pool was created
+ uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/storage-pools"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ body_format: json
+ validate_certs: no
+ register: current_storage_pools
+- uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/drives"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ body_format: json
+ validate_certs: no
+ register: current_drives
+- assert:
+ that: "{{ results.raidLevel == 'raid6' and
+ (current_drives.json | json_query(count_query) | length) == 5}}"
+ msg: "raid6 storage pool failed to be created with 5 drives."
+ vars:
+ count_query: "[?currentVolumeGroupRef=='{{ results.volumeGroupRef }}'].currentVolumeGroupRef"
+
+- name: Extend simple storage pool using raid6.
+ na_santricity_storagepool:
+ <<: *creds
+ state: present
+ name: raid6_storage
+ criteria_min_usable_capacity: 3.4
+ criteria_size_unit: tb
+ raid_level: raid6
+ register: results
+- pause: seconds=5
+- name: Verify storage pool was created
+ uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/storage-pools"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ body_format: json
+ validate_certs: no
+ register: current_storage_pools
+- assert:
+ that: "{{ item.raidLevel == 'raid6' and (item.totalRaidedSpace | int) >= 3738339534438 }}"
+ msg: "raid6 storage pool failed to be extended to a minimum of 3.4tb."
+ loop: "{{ lookup('list', storage_pools, wantList=True) }}"
+ vars:
+ storage_pools: "{{ current_storage_pools | json_query('json[?name==`raid6_storage`]') }}"
+
+- name: Migrate rai6 storage pool to raid0
+ na_santricity_storagepool:
+ <<: *creds
+ state: present
+ name: raid6_storage
+ criteria_min_usable_capacity: 3.4
+ criteria_size_unit: tb
+ raid_level: raid0
+ register: results
+- pause: seconds=5
+- name: Verify storage pool was created
+ uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/storage-pools"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ body_format: json
+ validate_certs: no
+ register: current_storage_pools
+- assert:
+ that: "{{ item.raidLevel == 'raid0' and (item.totalRaidedSpace | int) >= 3738339534438 }}"
+ msg: "raid6 storage pool failed to migrate to raid0."
+ loop: "{{ lookup('list', storage_pools, wantList=True) }}"
+ vars:
+ storage_pools: "{{ current_storage_pools | json_query('json[?name==`raid6_storage`]') }}"
+
+- name: Remove simple storage pool
+ na_santricity_storagepool:
+ <<: *creds
+ state: absent
+ name: "{{ item }}"
+ loop:
+ - raid6_storage
+
+# raidDiskPool
+# Create, rerun, extend, and modify raid level.
+- name: Create simple storage pool using raidDiskPool.
+ na_santricity_storagepool:
+ <<: *creds
+ state: present
+ name: raidDiskPool_storage
+ criteria_min_usable_capacity: 2300
+ criteria_size_unit: gb
+ raid_level: raidDiskPool
+ register: results
+- pause: seconds=5
+- name: Verify storage pool was created
+ uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/storage-pools"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ body_format: json
+ validate_certs: no
+ register: current_storage_pools
+- assert:
+ that: "{{ item.raidLevel == 'raidDiskPool' and (item.totalRaidedSpace | int) >= 2469606195200 }}"
+ msg: "Simple storage pool failed to be created."
+ loop: "{{ lookup('list', storage_pools, wantList=True) }}"
+ vars:
+ storage_pools: "{{ current_storage_pools | json_query('json[?name==`raidDiskPool_storage`]') }}"
+
+- name: Rerun simple storage pool creation.
+ na_santricity_storagepool:
+ <<: *creds
+ state: present
+ name: raidDiskPool_storage
+ criteria_min_usable_capacity: 2300
+ criteria_size_unit: gb
+ raid_level: raidDiskPool
+ register: results
+- pause: seconds=5
+- name: Verify storage pool was not modified
+ uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/storage-pools"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ body_format: json
+ validate_certs: no
+ register: current_storage_pools
+- assert:
+ that: "{{ not results.changed and item.raidLevel == 'raidDiskPool' and (item.totalRaidedSpace | int) >= 2469606195200 }}"
+ msg: "Simple storage pool failed not to be modified."
+ loop: "{{ lookup('list', storage_pools, wantList=True) }}"
+ vars:
+ storage_pools: "{{ current_storage_pools | json_query('json[?name==`raidDiskPool_storage`]') }}"
+
+- name: Extend simple storage pool to a minimum usable capacity of 3000gb
+ na_santricity_storagepool:
+ <<: *creds
+ state: present
+ name: raidDiskPool_storage
+ criteria_min_usable_capacity: 3000
+ criteria_size_unit: gb
+ raid_level: raidDiskPool
+ register: results
+- name: Verify storage pool was extended
+ uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/storage-pools"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ body_format: json
+ validate_certs: no
+ register: current_storage_pools
+- assert:
+ that: "{{ item.raidLevel == 'raidDiskPool' and (item.totalRaidedSpace | int) >= 3221225472000 }}"
+ msg: "Simple storage pool failed to be extended."
+ loop: "{{ lookup('list', storage_pools, wantList=True) }}"
+ vars:
+ storage_pools: "{{ current_storage_pools | json_query('json[?name==`raidDiskPool_storage`]') }}"
+
+- name: Extend simple storage pool.
+ na_santricity_storagepool:
+ <<: *creds
+ state: present
+ name: raidDiskPool_storage
+ criteria_drive_count: 12
+ raid_level: raidDiskPool
+ register: results
+- pause: seconds=5
+- name: Verify storage pool was created
+ uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/storage-pools"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ body_format: json
+ validate_certs: no
+ register: current_storage_pools
+- uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/drives"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ body_format: json
+ validate_certs: no
+ register: current_drives
+- assert:
+ that: "{{ results.raidLevel == 'raidDiskPool' and
+ (current_drives.json | json_query(count_query) | length) == 12}}"
+ msg: "raidDiskPool storage pool failed to be extended with 12 drives."
+ vars:
+ count_query: "[?currentVolumeGroupRef=='{{ results.volumeGroupRef }}'].currentVolumeGroupRef"
+
+- name: Remove simple storage pool
+ na_santricity_storagepool:
+ <<: *creds
+ state: absent
+ name: raidDiskPool_storage
+ register: results
+
+
+# raid0 secured
+- name: Create simple storage pool using raid0.
+ na_santricity_storagepool:
+ <<: *creds
+ state: present
+ name: raid0_storage
+ criteria_min_usable_capacity: 1400
+ secure_pool: yes
+ erase_secured_drives: yes
+ raid_level: raid0
+ register: results
+- pause: seconds=5
+- name: Verify storage pool was created
+ uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/storage-pools"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ body_format: json
+ validate_certs: no
+ register: current_storage_pools
+- assert:
+ that: "{{ item.raidLevel == 'raid0' and (item.totalRaidedSpace | int) >= 1503238553600 and
+ item.securityType == 'enabled' }}"
+ msg: "raid0 storage pool failed to be created."
+ loop: "{{ lookup('list', storage_pools, wantList=True) }}"
+ vars:
+ storage_pools: "{{ current_storage_pools | json_query('json[?name==`raid0_storage`]') }}"
+
+- name: (Repeat) Create simple storage pool using raid0.
+ na_santricity_storagepool:
+ <<: *creds
+ state: present
+ name: raid0_storage
+ criteria_min_usable_capacity: 1400
+ criteria_size_unit: gb
+ secure_pool: yes
+ erase_secured_drives: yes
+ raid_level: raid0
+ register: results
+- pause: seconds=5
+- name: Verify storage pool was created
+ uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/storage-pools"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ body_format: json
+ validate_certs: no
+ register: current_storage_pools
+- assert:
+ that: "{{ not results.changed and item.raidLevel == 'raid0' and (item.totalRaidedSpace | int) >= 1503238553600 and
+ item.securityType == 'enabled' }}"
+ msg: "raid0 storage pool failed not to be modified."
+ loop: "{{ lookup('list', storage_pools, wantList=True) }}"
+ vars:
+ storage_pools: "{{ current_storage_pools | json_query('json[?name==`raid0_storage`]') }}"
+
+- name: Extend storage pool to 2400gb minimum usable capacity.
+ na_santricity_storagepool:
+ <<: *creds
+ state: present
+ name: raid0_storage
+ criteria_min_usable_capacity: 2400
+ criteria_size_unit: gb
+ secure_pool: yes
+ erase_secured_drives: yes
+ raid_level: raid0
+ register: results
+- pause: seconds=5
+- name: Verify storage pool was created
+ uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/storage-pools"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ body_format: json
+ validate_certs: no
+ register: current_storage_pools
+- assert:
+ that: "{{ item.raidLevel == 'raid0' and (item.totalRaidedSpace | int) >= 2576980377600 and
+ item.securityType == 'enabled' }}"
+ msg: "raid0 storage pool using raid0 failed to be extended to a minimum of 2400gb."
+ loop: "{{ lookup('list', storage_pools, wantList=True) }}"
+ vars:
+ storage_pools: "{{ current_storage_pools | json_query('json[?name==`raid0_storage`]') }}"
+
+- name: Expand simple storage pool using raid0.
+ na_santricity_storagepool:
+ <<: *creds
+ state: present
+ name: raid0_storage
+ criteria_drive_count: 6
+ secure_pool: yes
+ erase_secured_drives: yes
+ raid_level: raid0
+ register: results
+- pause: seconds=5
+- name: Verify storage pool was created
+ uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/storage-pools"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ body_format: json
+ validate_certs: no
+ register: current_storage_pools
+- uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/drives"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ body_format: json
+ validate_certs: no
+ register: current_drives
+- assert:
+ that: "{{ results.raidLevel == 'raid0' and results.securityType == 'enabled' and
+ (current_drives.json | json_query(count_query) | length) == 6 }}"
+ msg: "raid0 storage pool failed to be extended to 6 drives."
+ vars:
+ count_query: "[?currentVolumeGroupRef=='{{ results.volumeGroupRef }}'].currentVolumeGroupRef"
+
+- name: Migrate raid0 storage pool to raid6.
+ na_santricity_storagepool:
+ <<: *creds
+ state: present
+ name: raid0_storage
+ criteria_drive_count: 6
+ secure_pool: yes
+ erase_secured_drives: yes
+ raid_level: raid6
+ register: results
+- pause: seconds=5
+- name: Verify storage pool was created
+ uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/storage-pools"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ body_format: json
+ validate_certs: no
+ register: current_storage_pools
+- uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/drives"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ body_format: json
+ validate_certs: no
+ register: current_drives
+- assert:
+ that: "{{ results.raidLevel == 'raid6' and results.securityType == 'enabled' and
+ (current_drives.json | json_query(count_query) | length) == 6 }}"
+ msg: "raid0 storage pool failed to migrate to raid6"
+ vars:
+ count_query: "[?currentVolumeGroupRef=='{{ results.volumeGroupRef }}'].currentVolumeGroupRef"
+
+- name: Remove simple storage pool
+ na_santricity_storagepool:
+ <<: *creds
+ state: absent
+ name: "{{ item }}"
+ erase_secured_drives: yes
+ loop:
+ - raid0_storage
+
+
+# raidDiskPool secured
+- name: Create simple storage pool using raidDiskPool.
+ na_santricity_storagepool:
+ <<: *creds
+ state: present
+ name: raidDiskPool_storage
+ criteria_min_usable_capacity: 2300
+ criteria_size_unit: gb
+ secure_pool: yes
+ erase_secured_drives: yes
+ raid_level: raidDiskPool
+ register: results
+- pause: seconds=5
+- name: Verify storage pool was created
+ uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/storage-pools"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ body_format: json
+ validate_certs: no
+ register: current_storage_pools
+- assert:
+ that: "{{ item.raidLevel == 'raidDiskPool' and (item.totalRaidedSpace | int) >= 2469606195200 and
+ item.securityType == 'enabled' }}"
+ msg: "Simple storage pool failed to be created."
+ loop: "{{ lookup('list', storage_pools, wantList=True) }}"
+ vars:
+ storage_pools: "{{ current_storage_pools | json_query('json[?name==`raidDiskPool_storage`]') }}"
+
+- name: Rerun simple storage pool creation.
+ na_santricity_storagepool:
+ <<: *creds
+ state: present
+ name: raidDiskPool_storage
+ criteria_min_usable_capacity: 2300
+ criteria_size_unit: gb
+ secure_pool: yes
+ erase_secured_drives: yes
+ raid_level: raidDiskPool
+ register: results
+- pause: seconds=5
+- name: Verify storage pool was not modified
+ uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/storage-pools"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ body_format: json
+ validate_certs: no
+ register: current_storage_pools
+- assert:
+ that: "{{ not results.changed and item.raidLevel == 'raidDiskPool' and (item.totalRaidedSpace | int) >= 2469606195200 and
+ item.securityType == 'enabled' }}"
+ msg: "Simple storage pool failed not to be modified."
+ loop: "{{ lookup('list', storage_pools, wantList=True) }}"
+ vars:
+ storage_pools: "{{ current_storage_pools | json_query('json[?name==`raidDiskPool_storage`]') }}"
+
+- name: Extend simple storage pool to a minimum usable capacity of 3000gb
+ na_santricity_storagepool:
+ <<: *creds
+ state: present
+ name: raidDiskPool_storage
+ criteria_min_usable_capacity: 3000
+ criteria_size_unit: gb
+ secure_pool: yes
+ erase_secured_drives: yes
+ raid_level: raidDiskPool
+ register: results
+- name: Verify storage pool was extended
+ uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/storage-pools"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ body_format: json
+ validate_certs: no
+ register: current_storage_pools
+- assert:
+ that: "{{ item.raidLevel == 'raidDiskPool' and (item.totalRaidedSpace | int) >= 3221225472000 and
+ item.securityType == 'enabled' }}"
+ msg: "Simple storage pool failed to be extended."
+ loop: "{{ lookup('list', storage_pools, wantList=True) }}"
+ vars:
+ storage_pools: "{{ current_storage_pools | json_query('json[?name==`raidDiskPool_storage`]') }}"
+
+- name: Extend simple storage pool.
+ na_santricity_storagepool:
+ <<: *creds
+ state: present
+ name: raidDiskPool_storage
+ criteria_drive_count: 12
+ secure_pool: yes
+ erase_secured_drives: yes
+ raid_level: raidDiskPool
+ register: results
+- pause: seconds=5
+- name: Verify storage pool was created
+ uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/storage-pools"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ body_format: json
+ validate_certs: no
+ register: current_storage_pools
+- uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/drives"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ body_format: json
+ validate_certs: no
+ register: current_drives
+- assert:
+ that: "{{ results.raidLevel == 'raidDiskPool' and results.securityType == 'enabled' and
+ (current_drives.json | json_query(count_query) | length) == 12 }}"
+ msg: "raidDiskPool storage pool failed to be extended with 12 drives."
+ vars:
+ count_query: "[?currentVolumeGroupRef=='{{ results.volumeGroupRef }}'].currentVolumeGroupRef"
+
+- name: Remove simple storage pool
+ na_santricity_storagepool:
+ <<: *creds
+ state: absent
+ name: raidDiskPool_storage
+ register: results
+
+
+# raidDiskPool set reserve drive count
+- name: Create simple storage pool using raidDiskPool.
+ na_santricity_storagepool:
+ <<: *creds
+ state: present
+ name: raidDiskPool_storage
+ criteria_drive_count: 11
+ reserve_drive_count: 1
+ secure_pool: yes
+ erase_secured_drives: yes
+ raid_level: raidDiskPool
+ register: results
+- pause: seconds=5
+- name: Verify storage pool was created
+ uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/storage-pools"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ body_format: json
+ validate_certs: no
+ register: current_storage_pools
+- assert:
+ that: "{{ item.raidLevel == 'raidDiskPool' and
+ item.volumeGroupData.diskPoolData.reconstructionReservedDriveCount == 1 and
+ item.securityType == 'enabled' }}"
+ msg: "Simple storage pool failed to be created."
+ loop: "{{ lookup('list', storage_pools, wantList=True) }}"
+ vars:
+ storage_pools: "{{ current_storage_pools | json_query('json[?name==`raidDiskPool_storage`]') }}"
+
+- name: Change disk pool reserve drive count.
+ na_santricity_storagepool:
+ <<: *creds
+ state: present
+ name: raidDiskPool_storage
+ criteria_drive_count: 12
+ reserve_drive_count: 2
+ secure_pool: yes
+ erase_secured_drives: yes
+ raid_level: raidDiskPool
+ register: results
+- pause: seconds=30
+- name: Verify storage pool was not modified
+ uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/storage-pools"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ body_format: json
+ validate_certs: no
+ register: current_storage_pools
+- assert:
+ that: "{{ item.raidLevel == 'raidDiskPool' and
+ item.volumeGroupData.diskPoolData.reconstructionReservedDriveCount == 2 and
+ item.securityType == 'enabled' }}"
+ msg: "Simple storage pool failed not to be modified."
+ loop: "{{ lookup('list', storage_pools, wantList=True) }}"
+ vars:
+ storage_pools: "{{ current_storage_pools | json_query('json[?name==`raidDiskPool_storage`]') }}"
+
+# erase drives on storage pool deletion
+- name: Remove simple storage pool
+ na_santricity_storagepool:
+ <<: *creds
+ state: absent
+ name: raidDiskPool_storage
+ erase_secured_drives: yes
+ register: results
+
+- name: Create simple storage pool using raidDiskPool with capacity and reserve count specified.
+ na_santricity_storagepool:
+ <<: *creds
+ state: present
+ name: raidDiskPool_storage
+ criteria_min_usable_capacity: 8000
+ criteria_size_unit: gb
+ reserve_drive_count: 2
+ secure_pool: yes
+ erase_secured_drives: yes
+ raid_level: raidDiskPool
+ register: results
+- pause: seconds=5
+- name: Verify storage pool was created
+ uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/storage-pools"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ body_format: json
+ validate_certs: no
+ register: current_storage_pools
+- assert:
+ that: "{{ item.raidLevel == 'raidDiskPool' and
+ (item.totalRaidedSpace | int) >= 3221225472000 and
+ item.volumeGroupData.diskPoolData.reconstructionReservedDriveCount == 2 and
+ item.securityType == 'enabled' }}"
+ msg: "Simple storage pool failed to be created."
+ loop: "{{ lookup('list', storage_pools, wantList=True) }}"
+ vars:
+ storage_pools: "{{ current_storage_pools | json_query('json[?name==`raidDiskPool_storage`]') }}"
+
+- name: Integration cleanup
+ na_santricity_storagepool:
+ <<: *creds
+ state: absent
+ name: raidDiskPool_storage
+ erase_secured_drives: yes
+ register: results
+- na_santricity_storagepool:
+ <<: *creds
+ state: absent
+ name: raidDiskPool_storage
+ register: results
diff --git a/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_syslog/tasks/main.yml b/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_syslog/tasks/main.yml
new file mode 100644
index 000000000..79830c3d6
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_syslog/tasks/main.yml
@@ -0,0 +1,127 @@
+# Test code for the na_santricity_syslog module
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+- name: Set facts for na_santricity_iscsi_target module's intergration test.
+ set_fact:
+ vars:
+ credentials: &creds
+ ssid: "{{ ssid }}"
+ api_url: "{{ base_url }}"
+ api_username: "{{ username }}"
+ api_password: "{{ password }}"
+ validate_certs: "{{ validate_cert }}"
+
+- name: Add initial syslog server settings (changed)
+ na_santricity_syslog:
+ <<: *creds
+ address: 192.168.1.100
+ port: 514
+ protocol: udp
+ components: ["auditLog"]
+ register: results
+- name: Verify results
+ assert:
+ that: "{{ results['changed'] }}"
+ msg: "Unexpected results!"
+
+- name: (Repeat) Configure initial syslog server settings (no change)
+ na_santricity_syslog:
+ <<: *creds
+ address: 192.168.1.100
+ port: 514
+ protocol: udp
+ components: ["auditLog"]
+ register: results
+- name: Verify results
+ assert:
+ that: "{{ not results['changed'] }}"
+ msg: "Unexpected results!"
+
+- name: Add another syslog server settings with different protocol (changed)
+ na_santricity_syslog:
+ <<: *creds
+ address: 192.168.1.100
+ port: 514
+ protocol: tcp
+ components: ["auditLog"]
+ register: results
+- name: Verify results
+ assert:
+ that: "{{ results['changed'] }}"
+ msg: "Unexpected results!"
+
+- name: Add another syslog server settings with different port (changed)
+ na_santricity_syslog:
+ <<: *creds
+ address: 192.168.1.100
+ port: 123
+ protocol: tcp
+ components: ["auditLog"]
+ register: results
+- name: Verify results
+ assert:
+ that: "{{ results['changed'] }}"
+ msg: "Unexpected results!"
+
+- name: Add another syslog server address (change, check_mode)
+ na_santricity_syslog:
+ <<: *creds
+ address: 192.168.1.200
+ port: 514
+ protocol: tcp
+ components: ["auditLog"]
+ check_mode: true
+ register: results
+- name: Verify results
+ assert:
+ that: "{{ results['changed'] }}"
+ msg: "Unexpected results!"
+
+- name: (Repeat) Add another syslog server address (change)
+ na_santricity_syslog:
+ <<: *creds
+ address: 192.168.1.200
+ port: 514
+ protocol: tcp
+ components: ["auditLog"]
+ register: results
+- name: Verify results
+ assert:
+ that: "{{ results['changed'] }}"
+ msg: "Unexpected results!"
+
+- name: Diasable syslog server (change)
+ na_santricity_syslog:
+ <<: *creds
+ state: absent
+ address: 192.168.1.100
+ port: 514
+ protocol: udp
+ components: ["auditLog"]
+
+- name: Diasable syslog server (change)
+ na_santricity_syslog:
+ <<: *creds
+ state: absent
+ address: 192.168.1.100
+ port: 514
+ protocol: tcp
+ components: ["auditLog"]
+
+- name: Diasable syslog server (change)
+ na_santricity_syslog:
+ <<: *creds
+ state: absent
+ address: 192.168.1.100
+ port: 123
+ protocol: tcp
+ components: ["auditLog"]
+
+- name: Diasable syslog server (change)
+ na_santricity_syslog:
+ <<: *creds
+ state: absent
+ address: 192.168.1.200
+ port: 1514
+ protocol: tcp
+ components: ["auditLog"]
diff --git a/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_volume/tasks/main.yml b/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_volume/tasks/main.yml
new file mode 100644
index 000000000..fe6d91d35
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/tests/integration/targets/na_santricity_volume/tasks/main.yml
@@ -0,0 +1,768 @@
+# Test code for the na_santricity_volume module
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+- name: Set facts for na_santricity_host module's intergration test.
+ set_fact:
+ credentials: &creds
+ ssid: "{{ ssid }}"
+ api_url: "{{ base_url }}"
+ api_username: "{{ username }}"
+ api_password: "{{ password }}"
+ validate_certs: "{{ validate_cert }}"
+
+# test setup
+- name: Delete raid 0 storage pool
+ na_santricity_storagepool:
+ <<: *creds
+ state: absent
+ name: "{{ item }}"
+ loop:
+ - storage_pool
+ - storage_pool2
+ - storage_pool3
+
+# Thick volume testing: create, delete, expand, change properties (read/write cache), expand and change properties,
+- name: Create raid 0 storage pool
+ na_santricity_storagepool:
+ <<: *creds
+ state: present
+ name: storage_pool
+ criteria_min_usable_capacity: 5
+ criteria_size_unit: tb
+ erase_secured_drives: yes
+ raid_level: raid0
+
+- name: Delete volume in raid 0 storage pool
+ na_santricity_volume:
+ <<: *creds
+ state: absent
+ name: volume
+
+- name: Create volume in raid 0 storage pool
+ na_santricity_volume:
+ <<: *creds
+ state: present
+ name: volume
+ storage_pool_name: storage_pool
+ size: 100
+ size_unit: gb
+ register: results
+- pause: seconds=15
+- uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/volumes"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ validate_certs: no
+ register: current
+- assert:
+ that: "{{ results.changed and item.name == 'volume' and not item.thinProvisioned and
+ item.capacity == '107374182400' and item.segmentSize == 131072}}"
+ msg: "Failed to create volume"
+ loop: "{{ lookup('list', volume, wantList=True) }}"
+ vars:
+ volume: "{{ current | json_query('json[?name==`volume`]') }}"
+
+- name: Re-execute volume creation in raid 0 storage pool
+ na_santricity_volume:
+ <<: *creds
+ state: present
+ name: volume
+ storage_pool_name: storage_pool
+ size: 100
+ size_unit: gb
+ register: results
+- pause: seconds=15
+- uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/volumes"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ validate_certs: no
+ register: current
+- assert:
+ that: "{{ not results.changed and item.name == 'volume' and not item.thinProvisioned and
+ item.capacity == '107374182400' and item.segmentSize == 131072}}"
+ msg: "Failed to create volume"
+ loop: "{{ lookup('list', volume, wantList=True) }}"
+ vars:
+ volume: "{{ current | json_query('json[?name==`volume`]') }}"
+
+- name: Update volume size
+ na_santricity_volume:
+ <<: *creds
+ state: present
+ name: volume
+ storage_pool_name: storage_pool
+ size: 200
+ size_unit: gb
+ register: results
+- pause: seconds=15
+- uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/volumes"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ validate_certs: no
+ register: current
+- assert:
+ that: "{{ results.changed and item.name == 'volume' and not item.thinProvisioned and
+ item.capacity == '214748364800' and item.segmentSize == 131072}}"
+ msg: "Failed to create volume"
+ loop: "{{ lookup('list', volume, wantList=True) }}"
+ vars:
+ volume: "{{ current | json_query('json[?name==`volume`]') }}"
+
+- pause: seconds=15
+
+- name: Update volume properties
+ na_santricity_volume:
+ <<: *creds
+ state: present
+ name: volume
+ storage_pool_name: storage_pool
+ size: 200
+ size_unit: gb
+ write_cache_enable: true
+ read_cache_enable: false
+ register: results
+- pause: seconds=15
+- uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/volumes"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ validate_certs: no
+ register: current
+- assert:
+ that: "{{ results.changed and item.name == 'volume' and not item.thinProvisioned and
+ item.capacity == '214748364800' and item.segmentSize == 131072 and
+ not item.cacheSettings.readCacheEnable and item.cacheSettings.writeCacheEnable}}"
+ msg: "Failed to create volume"
+ loop: "{{ lookup('list', volume, wantList=True) }}"
+ vars:
+ volume: "{{ current | json_query('json[?name==`volume`]') }}"
+
+- name: Update volume properties and expand storage capabilities
+ na_santricity_volume:
+ <<: *creds
+ state: present
+ name: volume
+ storage_pool_name: storage_pool
+ size: 300
+ size_unit: gb
+ write_cache_enable: false
+ read_cache_enable: true
+ register: results
+- pause: seconds=15
+- uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/volumes"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ validate_certs: no
+ register: current
+- assert:
+ that: "{{ results.changed and item.name == 'volume' and not item.thinProvisioned and
+ item.capacity == '322122547200' and item.segmentSize == 131072 and
+ item.cacheSettings.readCacheEnable and not item.cacheSettings.writeCacheEnable}}"
+ msg: "Failed to create volume"
+ loop: "{{ lookup('list', volume, wantList=True) }}"
+ vars:
+ volume: "{{ current | json_query('json[?name==`volume`]') }}"
+
+# Workload tagging testing: create, utilize existing (name only, name with same attributes), modify attributes
+- name: Add workload tag (change, new workload tag)
+ na_santricity_volume:
+ <<: *creds
+ state: present
+ name: volume
+ storage_pool_name: storage_pool
+ size: 300
+ size_unit: gb
+ write_cache_enable: false
+ read_cache_enable: true
+ workload_name: volume_tag
+ metadata:
+ volume_tag_key: volume_tag_value
+ register: results
+- pause: seconds=15
+- name: Validate volume workload changes
+ uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/volumes"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ validate_certs: no
+ register: current
+- assert:
+ that: "{{ results.changed and item.name == 'volume' and not item.thinProvisioned and
+ item.capacity == '322122547200' and item.segmentSize == 131072 and
+ item.cacheSettings.readCacheEnable and not item.cacheSettings.writeCacheEnable and
+ {'key': 'volumeTypeId', 'value': 'volume'} in item.metadata }}"
+ msg: "Failed to modify volume metadata!"
+ loop: "{{ lookup('list', volume, wantList=True) }}"
+ vars:
+ volume: "{{ current | json_query('json[?name==`volume`]') }}"
+- uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/workloads"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ validate_certs: no
+ register: workload_tags
+- assert:
+ that: "{{ item.name == 'volume_tag' and
+ {'key': 'volume_tag_key', 'value': 'volume_tag_value'} in item.workloadAttributes }}"
+ msg: "Workload tag failed to be created!"
+ loop: "{{ lookup('list', volume_tag_id, wantList=True) }}"
+ vars:
+ volume_tag_id: "{{ workload_tags | json_query('json[?name==`volume_tag`]') }}"
+
+- name: Repeat add workload tag (no change)
+ na_santricity_volume:
+ <<: *creds
+ state: present
+ name: volume
+ storage_pool_name: storage_pool
+ size: 300
+ size_unit: gb
+ write_cache_enable: false
+ read_cache_enable: true
+ workload_name: volume_tag
+ metadata:
+ volume_tag_key: volume_tag_value
+ register: results
+- pause: seconds=15
+- name: Validate volume workload changes
+ uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/volumes"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ validate_certs: no
+ register: current
+- assert:
+ that: "{{ not results.changed and item.name == 'volume' and not item.thinProvisioned and
+ item.capacity == '322122547200' and item.segmentSize == 131072 and
+ item.cacheSettings.readCacheEnable and not item.cacheSettings.writeCacheEnable and
+ {'key': 'volumeTypeId', 'value': 'volume'} in item.metadata }}"
+ msg: "Failed to not modify volume metadata!"
+ loop: "{{ lookup('list', volume, wantList=True) }}"
+ vars:
+ volume: "{{ current | json_query('json[?name==`volume`]') }}"
+- uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/workloads"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ validate_certs: no
+ register: workload_tags
+- assert:
+ that: "{{ item.name == 'volume_tag' and
+ {'key': 'volume_tag_key', 'value': 'volume_tag_value'} in item.workloadAttributes }}"
+ msg: "Workload tag failed not to be changed"
+ loop: "{{ lookup('list', volume_tag_id, wantList=True) }}"
+ vars:
+ volume_tag_id: "{{ workload_tags | json_query('json[?name==`volume_tag`]') }}"
+
+- name: Workload tag (no change, just using workload_name)
+ na_santricity_volume:
+ <<: *creds
+ state: present
+ name: volume
+ storage_pool_name: storage_pool
+ size: 300
+ size_unit: gb
+ write_cache_enable: false
+ read_cache_enable: true
+ workload_name: volume_tag
+ register: results
+- pause: seconds=15
+- name: Validate volume workload changes
+ uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/volumes"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ validate_certs: no
+ register: current
+- assert:
+ that: "{{ not results.changed and item.name == 'volume' and not item.thinProvisioned and
+ item.capacity == '322122547200' and item.segmentSize == 131072 and
+ item.cacheSettings.readCacheEnable and not item.cacheSettings.writeCacheEnable and
+ {'key': 'volumeTypeId', 'value': 'volume'} in item.metadata }}"
+ msg: "Failed to not modify volume metadata!"
+ loop: "{{ lookup('list', volume, wantList=True) }}"
+ vars:
+ volume: "{{ current | json_query('json[?name==`volume`]') }}"
+- uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/workloads"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ validate_certs: no
+ register: workload_tags
+- assert:
+ that: "{{ item.name == 'volume_tag' and
+ {'key': 'volume_tag_key', 'value': 'volume_tag_value'} in item.workloadAttributes }}"
+ msg: "Workload tag failed to not be modified!"
+ loop: "{{ lookup('list', volume_tag_id, wantList=True) }}"
+ vars:
+ volume_tag_id: "{{ workload_tags | json_query('json[?name==`volume_tag`]') }}"
+
+- name: Add workload tag (change, new attributes)
+ na_santricity_volume:
+ <<: *creds
+ state: present
+ name: volume
+ storage_pool_name: storage_pool
+ size: 300
+ size_unit: gb
+ write_cache_enable: false
+ read_cache_enable: true
+ workload_name: volume_tag
+ metadata:
+ volume_tag_key2: volume_tag_value2
+ register: results
+- pause: seconds=15
+- name: Validate volume workload changes
+ uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/volumes"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ validate_certs: no
+ register: current
+- assert:
+ that: "{{ results.changed and item.name == 'volume' and not item.thinProvisioned and
+ item.capacity == '322122547200' and item.segmentSize == 131072 and
+ item.cacheSettings.readCacheEnable and not item.cacheSettings.writeCacheEnable and
+ {'key': 'volumeTypeId', 'value': 'volume'} in item.metadata }}"
+ msg: "Failed to not modify volume metadata!"
+ loop: "{{ lookup('list', volume, wantList=True) }}"
+ vars:
+ volume: "{{ current | json_query('json[?name==`volume`]') }}"
+- uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/workloads"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ validate_certs: no
+ register: workload_tags
+- assert:
+ that: "{{ item.name == 'volume_tag' and
+ {'key': 'volume_tag_key2', 'value': 'volume_tag_value2'} in item.workloadAttributes }}"
+ msg: "Workload tag failed to be updated!"
+ loop: "{{ lookup('list', volume_tag_id, wantList=True) }}"
+ vars:
+ volume_tag_id: "{{ workload_tags | json_query('json[?name==`volume_tag`]') }}"
+
+- name: Remove workload tag from volume (change)
+ na_santricity_volume:
+ <<: *creds
+ state: present
+ name: volume
+ storage_pool_name: storage_pool
+ size: 300
+ size_unit: gb
+ write_cache_enable: false
+ read_cache_enable: true
+ register: results
+- pause: seconds=15
+- name: Validate volume workload changes
+ uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/volumes"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ validate_certs: no
+ register: current
+- assert:
+ that: "{{ results.changed and item.name == 'volume' and not item.thinProvisioned and
+ item.capacity == '322122547200' and item.segmentSize == 131072 and
+ item.cacheSettings.readCacheEnable and not item.cacheSettings.writeCacheEnable and
+ item.metadata == []}}"
+ msg: "Failed to not modify volume metadata!"
+ loop: "{{ lookup('list', volume, wantList=True) }}"
+ vars:
+ volume: "{{ current | json_query('json[?name==`volume`]') }}"
+- uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/workloads"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ validate_certs: no
+ register: workload_tags
+- assert:
+ that: "{{ item.name == 'volume_tag' and
+ {'key': 'volume_tag_key2', 'value': 'volume_tag_value2'} in item.workloadAttributes }}"
+ msg: "Workload tag failed to be updated!"
+ loop: "{{ lookup('list', volume_tag_id, wantList=True) }}"
+ vars:
+ volume_tag_id: "{{ workload_tags | json_query('json[?name==`volume_tag`]') }}"
+
+- name: Delete workload tag
+ uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/workloads"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ validate_certs: no
+ register: workload_tags
+- uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/workloads/{{ item }}"
+ method: DELETE
+ status_code: 204
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ validate_certs: no
+ loop: "{{ lookup('list', volume_tag_id, wantList=True) }}"
+ vars:
+ volume_tag_id: "{{ workload_tags | json_query('json[?name==`volume_tag`].id') }}"
+
+- name: Delete raid 0 storage pool
+ na_santricity_storagepool:
+ <<: *creds
+ state: absent
+ name: storage_pool
+
+
+# *** Thin volume testing (May not work with simulator) ***
+- name: Create dynamic disk pool
+ na_santricity_storagepool:
+ <<: *creds
+ state: present
+ name: storage_pool
+ criteria_min_usable_capacity: 2
+ criteria_size_unit: tb
+
+- name: Create thin volume
+ na_santricity_volume:
+ <<: *creds
+ state: present
+ name: thin_volume
+ storage_pool_name: storage_pool
+ size: 131072
+ size_unit: gb
+ thin_provision: true
+ thin_volume_repo_size: 32
+ thin_volume_max_repo_size: 1024
+ register: results
+- pause: seconds=15
+- uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/thin-volumes"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ validate_certs: no
+ register: current
+- assert:
+ that: "{{ results.changed and item.name == 'thin_volume' and item.thinProvisioned and
+ item.capacity == '140737488355328' and item.initialProvisionedCapacity == '34359738368' and
+ item.provisionedCapacityQuota == '1099511627776' and item.expansionPolicy == 'automatic' }}"
+ msg: "Failed to create volume"
+ loop: "{{ lookup('list', volume, wantList=True) }}"
+ vars:
+ volume: "{{ current | json_query('json[?name==`thin_volume`]') }}"
+
+- name: (Rerun) Create thin volume
+ na_santricity_volume:
+ <<: *creds
+ state: present
+ name: thin_volume
+ storage_pool_name: storage_pool
+ size: 131072
+ size_unit: gb
+ thin_provision: true
+ thin_volume_repo_size: 32
+ thin_volume_max_repo_size: 1024
+ register: results
+- pause: seconds=15
+- uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/thin-volumes"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ validate_certs: no
+ register: current
+- assert:
+ that: "{{ not results.changed and item.name == 'thin_volume' and item.thinProvisioned and
+ item.capacity == '140737488355328' and item.initialProvisionedCapacity == '34359738368' and
+ item.provisionedCapacityQuota == '1099511627776' and item.expansionPolicy == 'automatic' }}"
+ msg: "Failed to create volume"
+ loop: "{{ lookup('list', volume, wantList=True) }}"
+ vars:
+ volume: "{{ current | json_query('json[?name==`thin_volume`]') }}"
+
+
+- name: Expand thin volume's virtual size
+ na_santricity_volume:
+ <<: *creds
+ state: present
+ name: thin_volume
+ storage_pool_name: storage_pool
+ size: 262144
+ size_unit: gb
+ thin_provision: true
+ thin_volume_repo_size: 32
+ thin_volume_max_repo_size: 1024
+ register: results
+- pause: seconds=15
+- uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/thin-volumes"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ validate_certs: no
+ register: current
+- assert:
+ that: "{{ results.changed and item.name == 'thin_volume' and item.thinProvisioned and
+ item.capacity == '281474976710656' and item.initialProvisionedCapacity == '34359738368' and
+ item.provisionedCapacityQuota == '1099511627776' and item.expansionPolicy == 'automatic' }}"
+ msg: "Failed to create volume"
+ loop: "{{ lookup('list', volume, wantList=True) }}"
+ vars:
+ volume: "{{ current | json_query('json[?name==`thin_volume`]') }}"
+
+
+- name: Expand thin volume's maximum repository size
+ na_santricity_volume:
+ <<: *creds
+ state: present
+ name: thin_volume
+ storage_pool_name: storage_pool
+ size: 262144
+ size_unit: gb
+ thin_provision: true
+ thin_volume_repo_size: 32
+ thin_volume_max_repo_size: 2048
+ register: results
+- pause: seconds=15
+- uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/thin-volumes"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ validate_certs: no
+ register: current
+- assert:
+ that: "{{ results.changed and item.name == 'thin_volume' and item.thinProvisioned and
+ item.capacity == '281474976710656' and item.initialProvisionedCapacity == '34359738368' and
+ item.provisionedCapacityQuota == '2199023255552' and item.expansionPolicy == 'automatic' }}"
+ msg: "Failed to create volume"
+ loop: "{{ lookup('list', volume, wantList=True) }}"
+ vars:
+ volume: "{{ current | json_query('json[?name==`thin_volume`]') }}"
+
+- name: Create dynamic disk pool
+ na_santricity_storagepool:
+ <<: *creds
+ state: present
+ name: storage_pool2
+ criteria_min_usable_capacity: 2
+ criteria_size_unit: tb
+- pause: seconds=15
+
+- name: Create second thin volume with manual expansion policy
+ na_santricity_volume:
+ <<: *creds
+ state: present
+ name: thin_volume2
+ storage_pool_name: storage_pool2
+ size_unit: gb
+ size: 131072
+ thin_provision: true
+ thin_volume_repo_size: 32
+ thin_volume_max_repo_size: 32
+ thin_volume_expansion_policy: manual
+ register: results
+- pause: seconds=15
+- uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/thin-volumes"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ validate_certs: no
+ register: current
+- assert:
+ that: "{{ results.changed and item.name == 'thin_volume2' and item.thinProvisioned and
+ item.capacity == '140737488355328' and item.initialProvisionedCapacity == '34359738368' and
+ item.currentProvisionedCapacity == '34359738368' and item.expansionPolicy == 'manual' }}"
+ msg: "Failed to create volume"
+ loop: "{{ lookup('list', volume, wantList=True) }}"
+ vars:
+ volume: "{{ current | json_query('json[?name==`thin_volume2`]') }}"
+
+
+- name: Create second thin volume with manual expansion policy
+ na_santricity_volume:
+ <<: *creds
+ state: present
+ name: thin_volume2
+ storage_pool_name: storage_pool2
+ size_unit: gb
+ size: 131072
+ thin_provision: true
+ thin_volume_repo_size: 288
+ thin_volume_max_repo_size: 288
+ thin_volume_expansion_policy: manual
+ register: results
+- pause: seconds=15
+- uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/thin-volumes"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ validate_certs: no
+ register: current
+- assert:
+ that: "{{ results.changed and item.name == 'thin_volume2' and item.thinProvisioned and
+ item.capacity == '140737488355328' and item.initialProvisionedCapacity == '34359738368' and
+ item.currentProvisionedCapacity == '309237645312' and item.expansionPolicy == 'manual' }}"
+ msg: "Failed to create volume"
+ loop: "{{ lookup('list', volume, wantList=True) }}"
+ vars:
+ volume: "{{ current | json_query('json[?name==`thin_volume2`]') }}"
+
+- name: Modify second thin volume to use automatic expansion policy
+ na_santricity_volume:
+ <<: *creds
+ state: present
+ name: thin_volume2
+ storage_pool_name: storage_pool2
+ size_unit: gb
+ size: 131072
+ thin_provision: true
+ thin_volume_repo_size: 288
+ thin_volume_max_repo_size: 288
+ thin_volume_expansion_policy: automatic
+ register: results
+- pause: seconds=15
+- uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/thin-volumes"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ validate_certs: no
+ register: current
+- assert:
+ that: "{{ results.changed and item.name == 'thin_volume2' and item.thinProvisioned and
+ item.capacity == '140737488355328' and item.initialProvisionedCapacity == '34359738368' and
+ item.currentProvisionedCapacity == '309237645312' and item.expansionPolicy == 'automatic' }}"
+ msg: "Failed to create volume"
+ loop: "{{ lookup('list', volume, wantList=True) }}"
+ vars:
+ volume: "{{ current | json_query('json[?name==`thin_volume2`]') }}"
+
+- name: Delete raid 0 storage pool
+ na_santricity_storagepool:
+ <<: *creds
+ state: absent
+ name: "{{ item }}"
+ loop:
+ - storage_pool
+ - storage_pool2
+
+- name: Create raid 0 storage pool
+ na_santricity_storagepool:
+ <<: *creds
+ state: present
+ name: storage_pool
+ criteria_min_usable_capacity: 5
+ criteria_size_unit: tb
+ erase_secured_drives: yes
+ raid_level: raid0
+
+# Thick volume expansion testing: wait and don't wait for operation to complete
+- name: Create raid 6 storage pool
+ na_santricity_storagepool:
+ <<: *creds
+ state: present
+ name: storage_pool3
+ criteria_min_usable_capacity: 5
+ criteria_size_unit: tb
+ erase_secured_drives: yes
+ raid_level: raid6
+
+- name: Delete volume in raid 6 storage pool
+ na_santricity_volume:
+ <<: *creds
+ state: absent
+ name: volume
+
+- name: Create volume in raid 0 storage pool for expansion testing
+ na_santricity_volume:
+ <<: *creds
+ state: present
+ name: volume
+ storage_pool_name: storage_pool3
+ size: 1
+ size_unit: gb
+ register: results
+- pause: seconds=10
+- uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/volumes"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ validate_certs: no
+ register: current
+- assert:
+ that: "{{ results.changed and item.name == 'volume' and not item.thinProvisioned and
+ item.capacity == '1073741824' and item.segmentSize == 131072}}"
+ msg: "Failed to create volume"
+ loop: "{{ lookup('list', volume, wantList=True) }}"
+ vars:
+ volume: "{{ current | json_query('json[?name==`volume`]') }}"
+
+- name: Modify volume in raid 0 storage pool and wait for expansion testing
+ na_santricity_volume:
+ <<: *creds
+ state: present
+ name: volume
+ storage_pool_name: storage_pool3
+ size: 10
+ size_unit: gb
+ wait_for_initialization: True
+ register: results
+- pause: seconds=10
+- uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/volumes"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ validate_certs: no
+ register: current
+- uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/volumes/{{ volume[0]['id'] }}/expand"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ validate_certs: no
+ register: expansion_state
+ vars:
+ volume: "{{ current | json_query('json[?name==`volume`]') }}"
+- assert:
+ that: "{{ results.changed and item.name == 'volume' and not item.thinProvisioned and
+ item.capacity == '10737418240' and item.segmentSize == 131072 and
+ expansion_state['json']['action'] == 'none'}}"
+ msg: "Volume expansion test failed."
+ loop: "{{ lookup('list', volume, wantList=True) }}"
+ vars:
+ volume: "{{ current | json_query('json[?name==`volume`]') }}"
+
+- name: Modify volume in raid 0 storage pool and don't wait for expansion testing
+ na_santricity_volume:
+ <<: *creds
+ state: present
+ name: volume
+ storage_pool_name: storage_pool3
+ size: 100
+ size_unit: gb
+ wait_for_initialization: False
+ register: results
+- pause: seconds=10
+- uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/volumes"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ validate_certs: no
+ register: current
+- uri:
+ url: "{{ credentials.api_url }}storage-systems/{{ credentials.ssid }}/volumes/{{ volume[0]['id'] }}/expand"
+ user: "{{ credentials.api_username }}"
+ password: "{{ credentials.api_password }}"
+ validate_certs: no
+ register: expansion_state
+ vars:
+ volume: "{{ current | json_query('json[?name==`volume`]') }}"
+- assert:
+ that: "{{ results.changed and item.name == 'volume' and not item.thinProvisioned and
+ item.capacity == '107374182400' and item.segmentSize == 131072 and expansion_state['json']['action'] != 'none'}}"
+ msg: "Failed to create volume"
+ loop: "{{ lookup('list', volume, wantList=True) }}"
+ vars:
+ volume: "{{ current | json_query('json[?name==`volume`]') }}"
+
+- name: Delete raid 0 storage pool
+ na_santricity_storagepool:
+ <<: *creds
+ state: absent
+ name: "{{ item }}"
+ loop:
+ - storage_pool3 \ No newline at end of file
diff --git a/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_alerts.py b/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_alerts.py
new file mode 100644
index 000000000..3510e5107
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_alerts.py
@@ -0,0 +1,194 @@
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_alerts import NetAppESeriesAlerts
+from units.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args
+from units.compat import mock
+
+
+class AlertsTest(ModuleTestCase):
+ REQUIRED_PARAMS = {
+ 'api_username': 'rw',
+ 'api_password': 'password',
+ 'api_url': 'http://localhost',
+ 'ssid': '1',
+ 'state': 'disabled'
+ }
+ REQ_FUNC = 'ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_alerts.NetAppESeriesAlerts.request'
+
+ def _set_args(self, **kwargs):
+ module_args = self.REQUIRED_PARAMS.copy()
+ if kwargs is not None:
+ module_args.update(kwargs)
+ set_module_args(module_args)
+
+ def _validate_args(self, **kwargs):
+ self._set_args(**kwargs)
+ NetAppESeriesAlerts()
+
+ def test_validation_disable(self):
+ """Ensure a default configuration succeeds"""
+ self._validate_args()
+
+ def test_validation_enable(self):
+ """Ensure a typical, default configuration succeeds"""
+ self._validate_args(state='enabled', server='localhost', sender='x@y.z', recipients=['a@b.c'])
+
+ def test_validation_fail_required(self):
+ """Ensure we fail on missing configuration"""
+
+ # Missing recipients
+ with self.assertRaises(AnsibleFailJson):
+ self._validate_args(state='enabled', server='localhost', sender='x@y.z')
+ NetAppESeriesAlerts()
+
+ # Missing sender
+ with self.assertRaises(AnsibleFailJson):
+ self._validate_args(state='enabled', server='localhost', recipients=['a@b.c'])
+ NetAppESeriesAlerts()
+
+ # Missing server
+ with self.assertRaises(AnsibleFailJson):
+ self._validate_args(state='enabled', sender='x@y.z', recipients=['a@b.c'])
+
+ def test_validation_fail(self):
+ # Empty recipients
+ with self.assertRaises(AnsibleFailJson):
+ self._validate_args(state='enabled', server='localhost', sender='x@y.z', recipients=[])
+
+ # Bad sender
+ with self.assertRaises(AnsibleFailJson):
+ self._validate_args(state='enabled', server='localhost', sender='y.z', recipients=['a@b.c'])
+
+ def test_get_configuration(self):
+ """Validate retrieving the current configuration"""
+ self._set_args(state='enabled', server='localhost', sender='x@y.z', recipients=['a@b.c'])
+
+ expected = 'result'
+ alerts = NetAppESeriesAlerts()
+ alerts.is_proxy = lambda: False
+ alerts.is_embedded_available = lambda: False
+
+ # Expecting an update
+ with mock.patch(self.REQ_FUNC, return_value=(200, expected)) as req:
+ actual = alerts.get_configuration()
+ self.assertEquals(expected, actual)
+ self.assertEquals(req.call_count, 1)
+
+ def test_update_configuration(self):
+ """Validate updating the configuration"""
+ initial = dict(alertingEnabled=True,
+ emailServerAddress='localhost',
+ sendAdditionalContactInformation=True,
+ additionalContactInformation='None',
+ emailSenderAddress='x@y.z',
+ recipientEmailAddresses=['x@y.z']
+ )
+
+ args = dict(state='enabled', server=initial['emailServerAddress'], sender=initial['emailSenderAddress'],
+ contact=initial['additionalContactInformation'], recipients=initial['recipientEmailAddresses'])
+
+ self._set_args(**args)
+
+ alerts = NetAppESeriesAlerts()
+ alerts.is_proxy = lambda: False
+ alerts.is_embedded_available = lambda: False
+
+ # Ensure when trigger updates when each relevant field is changed
+ with mock.patch(self.REQ_FUNC, return_value=(200, None)) as req:
+ with mock.patch.object(alerts, 'get_configuration', return_value=initial):
+ update = alerts.update_configuration()
+ self.assertFalse(update)
+
+ alerts.sender = 'a@b.c'
+ update = alerts.update_configuration()
+ self.assertTrue(update)
+ self._set_args(**args)
+
+ alerts.recipients = ['a@b.c']
+ update = alerts.update_configuration()
+ self.assertTrue(update)
+ self._set_args(**args)
+
+ alerts.contact = 'abc'
+ update = alerts.update_configuration()
+ self.assertTrue(update)
+ self._set_args(**args)
+
+ alerts.server = 'abc'
+ update = alerts.update_configuration()
+ self.assertTrue(update)
+
+ def test_send_test_email_check(self):
+ """Ensure we handle check_mode correctly"""
+ self._set_args(test=True)
+ alerts = NetAppESeriesAlerts()
+ alerts.check_mode = True
+ with mock.patch(self.REQ_FUNC) as req:
+ with mock.patch.object(alerts, 'update_configuration', return_value=True):
+ alerts.send_test_email()
+ self.assertFalse(req.called)
+
+ def test_send_test_email(self):
+ """Ensure we send a test email if test=True"""
+ self._set_args(test=True)
+ alerts = NetAppESeriesAlerts()
+ alerts.is_proxy = lambda: False
+ alerts.is_embedded_available = lambda: False
+
+ with mock.patch(self.REQ_FUNC, return_value=(200, dict(response='emailSentOK'))) as req:
+ alerts.send_test_email()
+ self.assertTrue(req.called)
+
+ def test_send_test_email_fail(self):
+ """Ensure we fail if the test returned a failure status"""
+ self._set_args(test=True)
+ alerts = NetAppESeriesAlerts()
+ alerts.is_proxy = lambda: False
+ alerts.is_embedded_available = lambda: False
+
+ ret_msg = 'fail'
+ with self.assertRaisesRegexp(AnsibleFailJson, ret_msg):
+ with mock.patch(self.REQ_FUNC, return_value=(200, dict(response=ret_msg))) as req:
+ alerts.send_test_email()
+ self.assertTrue(req.called)
+
+ def test_send_test_email_fail_connection(self):
+ """Ensure we fail cleanly if we hit a connection failure"""
+ self._set_args(test=True)
+ alerts = NetAppESeriesAlerts()
+ alerts.is_proxy = lambda: False
+ alerts.is_embedded_available = lambda: False
+
+ with self.assertRaisesRegexp(AnsibleFailJson, r"failed to send"):
+ with mock.patch(self.REQ_FUNC, side_effect=Exception) as req:
+ alerts.send_test_email()
+ self.assertTrue(req.called)
+
+ def test_update(self):
+ # Ensure that when test is enabled and alerting is enabled, we run the test
+ self._set_args(state='enabled', server='localhost', sender='x@y.z', recipients=['a@b.c'], test=True)
+ alerts = NetAppESeriesAlerts()
+ with self.assertRaisesRegexp(AnsibleExitJson, r"enabled"):
+ with mock.patch.object(alerts, 'update_configuration', return_value=True):
+ with mock.patch.object(alerts, 'send_test_email') as test:
+ alerts.update()
+ self.assertTrue(test.called)
+
+ # Ensure we don't run a test when changed=False
+ with self.assertRaisesRegexp(AnsibleExitJson, r"enabled"):
+ with mock.patch.object(alerts, 'update_configuration', return_value=False):
+ with mock.patch.object(alerts, 'send_test_email') as test:
+ alerts.update()
+ self.assertFalse(test.called)
+
+ # Ensure that test is not called when we have alerting disabled
+ self._set_args(state='disabled')
+ alerts = NetAppESeriesAlerts()
+ with self.assertRaisesRegexp(AnsibleExitJson, r"disabled"):
+ with mock.patch.object(alerts, 'update_configuration', return_value=True):
+ with mock.patch.object(alerts, 'send_test_email') as test:
+ alerts.update()
+ self.assertFalse(test.called)
diff --git a/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_alerts_syslog.py b/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_alerts_syslog.py
new file mode 100644
index 000000000..758c7c21c
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_alerts_syslog.py
@@ -0,0 +1,151 @@
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_alerts_syslog import NetAppESeriesAlertsSyslog
+from units.modules.utils import AnsibleFailJson, AnsibleExitJson, ModuleTestCase, set_module_args
+from units.compat import mock
+
+
+class NetAppESeriesAlertSyslogTest(ModuleTestCase):
+ REQUIRED_PARAMS = {
+ "api_username": "rw",
+ "api_password": "password",
+ "api_url": "http://localhost",
+ }
+ REQ_FUNC = 'ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_alerts_syslog.NetAppESeriesAlertsSyslog.request'
+ BASE_REQ_FUNC = 'ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity.request'
+
+ def _set_args(self, args=None):
+ module_args = self.REQUIRED_PARAMS.copy()
+ if args is not None:
+ module_args.update(args)
+ set_module_args(module_args)
+
+ def test_valid_options_pass(self):
+ """Validate valid options."""
+ options_list = [{"servers": []},
+ {"servers": [{"address": "192.168.1.100"}]},
+ {"servers": [{"address": "192.168.1.100", "port": 1000}]},
+ {"servers": [{"address": "192.168.1.100"}, {"address": "192.168.1.200", "port": 1000}, {"address": "192.168.1.300", "port": 2000}]},
+ {"servers": [{"address": "192.168.1.101"}, {"address": "192.168.1.102"}, {"address": "192.168.1.103"},
+ {"address": "192.168.1.104"}, {"address": "192.168.1.105"}]}]
+
+ for options in options_list:
+ self._set_args(options)
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.00.00.00"}), (200, {"runningAsProxy": False})]):
+ syslog = NetAppESeriesAlertsSyslog()
+ for options in options_list:
+ self._set_args(options)
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.00.00.00"}), (200, {"runningAsProxy": True})]):
+ syslog = NetAppESeriesAlertsSyslog()
+
+ def test_invalid_options_fail(self):
+ """Validate exceptions are thrown when invalid options are provided."""
+ options_list = [{"servers": [{"address": "192.168.1.100"}, {"address": "192.168.1.200"}, {"address": "192.168.1.300"},
+ {"address": "192.168.1.101"}, {"address": "192.168.1.102"}, {"address": "192.168.1.103"}]}]
+
+ for options in options_list:
+ self._set_args(options)
+ with self.assertRaisesRegexp(AnsibleFailJson, "Maximum number of syslog servers is 5!"):
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.00.00.00"}), (200, {"runningAsProxy": False})]):
+ syslog = NetAppESeriesAlertsSyslog()
+
+ def test_change_required_pass(self):
+ """Validate is_change_required properly reports true."""
+ options_list = [{"servers": []},
+ {"servers": [{"address": "192.168.1.100"}]},
+ {"servers": [{"address": "192.168.1.100", "port": 1000}]},
+ {"servers": [{"address": "192.168.1.100"}, {"address": "192.168.1.200", "port": 1000}, {"address": "192.168.1.300", "port": 2000}]},
+ {"servers": [{"address": "192.168.1.101"}, {"address": "192.168.1.102"}, {"address": "192.168.1.103"},
+ {"address": "192.168.1.104"}, {"address": "192.168.1.105"}]}]
+ current_config_list = [{"syslogReceivers": [{"serverName": "192.168.1.100", "portNumber": 514}]},
+ {"syslogReceivers": [{"serverName": "192.168.1.100", "portNumber": 1000}]},
+ {"syslogReceivers": [{"serverName": "192.168.1.101", "portNumber": 1000}]},
+ {"syslogReceivers": [{"serverName": "192.168.1.100", "portNumber": 514}]},
+ {"syslogReceivers": [{"serverName": "192.168.1.100", "portNumber": 514}]}]
+
+ for index in range(5):
+ self._set_args(options_list[index])
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.00.00.00"}), (200, {"runningAsProxy": False})]):
+ syslog = NetAppESeriesAlertsSyslog()
+ syslog.get_current_configuration = lambda: current_config_list[index]
+ self.assertTrue(syslog.is_change_required())
+
+ def test_get_current_configuration_fail(self):
+ """Verify get_current_configuration throws expected exception."""
+ self._set_args({"servers": []})
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.00.00.00"}), (200, {"runningAsProxy": False})]):
+ syslog = NetAppESeriesAlertsSyslog()
+
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to retrieve syslog configuration!"):
+ with mock.patch(self.REQ_FUNC, return_value=Exception()):
+ syslog.get_current_configuration()
+
+ def test_no_change_required_pass(self):
+ """Validate is_change_required properly reports false."""
+ options_list = [{"servers": []},
+ {"servers": [{"address": "192.168.1.100"}]},
+ {"servers": [{"address": "192.168.1.101", "port": 1000}, {"address": "192.168.1.100", "port": 514}]}]
+ current_config_list = [{"syslogReceivers": []},
+ {"syslogReceivers": [{"serverName": "192.168.1.100", "portNumber": 514}]},
+ {"syslogReceivers": [{"serverName": "192.168.1.100", "portNumber": 514}, {"serverName": "192.168.1.101", "portNumber": 1000}]}]
+
+ for index in range(3):
+ self._set_args(options_list[index])
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.00.00.00"}), (200, {"runningAsProxy": False})]):
+ syslog = NetAppESeriesAlertsSyslog()
+ syslog.get_current_configuration = lambda: current_config_list[index]
+ self.assertFalse(syslog.is_change_required())
+
+ def test_request_body_pass(self):
+ """Verify request body is properly formatted."""
+ options_list = [{"servers": []},
+ {"servers": [{"address": "192.168.1.100"}]},
+ {"servers": [{"address": "192.168.1.101", "port": 1000}, {"address": "192.168.1.100", "port": 514}]}]
+ expected_config_list = [{"syslogReceivers": [], "defaultFacility": 3, "defaultTag": "StorageArray"},
+ {"syslogReceivers": [{"serverName": "192.168.1.100", "portNumber": 514}], "defaultFacility": 3, "defaultTag": "StorageArray"},
+ {"syslogReceivers": [{"serverName": "192.168.1.101", "portNumber": 1000}, {"serverName": "192.168.1.100", "portNumber": 514}],
+ "defaultFacility": 3, "defaultTag": "StorageArray"}]
+
+ for index in range(3):
+ self._set_args(options_list[index])
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.00.00.00"}), (200, {"runningAsProxy": False})]):
+ syslog = NetAppESeriesAlertsSyslog()
+ self.assertEqual(syslog.make_request_body(), expected_config_list[index])
+
+ def test_test_configuration_fail(self):
+ """Verify get_current_configuration throws expected exception."""
+ self._set_args({"servers": []})
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.00.00.00"}), (200, {"runningAsProxy": False})]):
+ syslog = NetAppESeriesAlertsSyslog()
+
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to send test message!"):
+ with mock.patch(self.REQ_FUNC, return_value=Exception()):
+ syslog.test_configuration()
+
+ def test_update_pass(self):
+ """Verify update method successfully completes."""
+ self._set_args({"test": True, "servers": [{"address": "192.168.1.100"}]})
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.00.00.00"}), (200, {"runningAsProxy": False})]):
+ syslog = NetAppESeriesAlertsSyslog()
+ syslog.is_change_required = lambda: True
+ syslog.make_request_body = lambda: {}
+ self.test_configuration = lambda: None
+
+ with self.assertRaises(AnsibleExitJson):
+ with mock.patch(self.REQ_FUNC, return_value=(200, None)):
+ syslog.update()
+
+ def tests_update_fail(self):
+ """Verify update method throws expected exceptions."""
+ self._set_args({"servers": []})
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.00.00.00"}), (200, {"runningAsProxy": False})]):
+ syslog = NetAppESeriesAlertsSyslog()
+ syslog.is_change_required = lambda: True
+ syslog.make_request_body = lambda: {}
+
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to add syslog server!"):
+ with mock.patch(self.REQ_FUNC, return_value=Exception()):
+ syslog.update()
diff --git a/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_asup.py b/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_asup.py
new file mode 100644
index 000000000..84c05d59e
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_asup.py
@@ -0,0 +1,318 @@
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import time
+from units.compat import mock
+from ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_asup import NetAppESeriesAsup
+from units.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args
+
+
+class AsupTest(ModuleTestCase):
+ REQUIRED_PARAMS = {
+ "api_username": "rw",
+ "api_password": "password",
+ "api_url": "http://localhost",
+ "ssid": "1",
+ }
+
+ REQ_FUNC = "ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_asup.NetAppESeriesAsup.request"
+ BASE_REQ_FUNC = 'ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity.request'
+ TIME_FUNC = "ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_asup.time.time"
+
+ def _set_args(self, args=None):
+ module_args = self.REQUIRED_PARAMS.copy()
+ if args is not None:
+ module_args.update(args)
+ set_module_args(module_args)
+
+ def test_valid_options_pass(self):
+ """Validate valid options."""
+ options_list = [
+ {"state": "disabled", "active": False},
+ {"state": "enabled", "active": False, "start": 20, "end": 24, "days": ["saturday", "sunday"],
+ "method": "email", "email": {"server": "192.168.1.100", "sender": "noreply@netapp.com"}},
+ {"state": "enabled", "active": False, "start": 20, "end": 24, "days": ["saturday", "sunday"],
+ "method": "https", "routing_type": "direct"},
+ {"state": "enabled", "active": False, "start": 20, "end": 24, "days": ["saturday", "sunday"],
+ "method": "https", "routing_type": "proxy", "proxy": {"host": "192.168.1.100", "port": 1234}},
+ {"state": "enabled", "active": False, "start": 20, "end": 24, "days": ["saturday", "sunday"],
+ "method": "https", "routing_type": "script", "proxy": {"script": "/path/to/proxy/script.sh"}},
+ {"state": "maintenance_enabled", "maintenance_duration": 24, "maintenance_emails": ["janey@netapp.com", "joe@netapp.com"]},
+ {"state": "maintenance_disabled"}
+ ]
+
+ for options in options_list:
+ self._set_args(options)
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.00.00.00"}), (200, {"runningAsProxy": False})]):
+ asup = NetAppESeriesAsup()
+ for options in options_list:
+ self._set_args(options)
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.00.00.00"}), (200, {"runningAsProxy": True})]):
+ asup = NetAppESeriesAsup()
+
+ def test_invalid_options_fail(self):
+ """Verify invalid options throw expected exceptions."""
+ options_list = [
+ {"state": "enabled", "active": False, "start": 24, "end": 23, "days": ["saturday", "sunday"],
+ "method": "email", "email": {"server": "192.168.1.100", "sender": "noreply@netapp.com"}},
+ {"state": "enabled", "active": False, "start": -1, "end": 23, "days": ["saturday", "sunday"],
+ "method": "email", "email": {"server": "192.168.1.100", "sender": "noreply@netapp.com"}},
+ {"state": "enabled", "active": False, "start": 20, "end": 25, "days": ["saturday", "sunday"],
+ "method": "email", "email": {"server": "192.168.1.100", "sender": "noreply@netapp.com"}},
+ {"state": "enabled", "active": False, "start": 20, "end": 24, "days": ["not_a_day", "sunday"],
+ "method": "https", "routing_type": "direct"},
+ {"state": "maintenance_enabled", "maintenance_duration": 0, "maintenance_emails": ["janey@netapp.com", "joe@netapp.com"]},
+ {"state": "maintenance_enabled", "maintenance_duration": 73, "maintenance_emails": ["janey@netapp.com", "joe@netapp.com"]},
+ ]
+
+ for options in options_list:
+ self._set_args(options)
+ with self.assertRaises(AnsibleFailJson):
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.00.00.00"}), (200, {"runningAsProxy": False})]):
+ asup = NetAppESeriesAsup()
+
+ def test_get_configuration_fail(self):
+ """Verify get_configuration method throws expected exceptions."""
+ self._set_args({"state": "disabled", "active": False})
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.00.00.00"}), (200, {"runningAsProxy": False})]):
+ asup = NetAppESeriesAsup()
+ with mock.patch(self.REQ_FUNC, return_value=Exception()):
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to retrieve ASUP configuration!"):
+ asup.get_configuration()
+ self._set_args({"state": "disabled", "active": False})
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.00.00.00"}), (200, {"runningAsProxy": False})]):
+ asup = NetAppESeriesAsup()
+ with mock.patch(self.REQ_FUNC, return_value=(200, {"asupCapable": False, "onDemandCapable": True})):
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to retrieve ASUP configuration!"):
+ asup.get_configuration()
+ self._set_args({"state": "disabled", "active": False})
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.00.00.00"}), (200, {"runningAsProxy": False})]):
+ asup = NetAppESeriesAsup()
+ with mock.patch(self.REQ_FUNC, return_value=(200, {"asupCapable": True, "onDemandCapable": False})):
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to retrieve ASUP configuration!"):
+ asup.get_configuration()
+ self._set_args({"state": "disabled", "active": False})
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.00.00.00"}), (200, {"runningAsProxy": False})]):
+ asup = NetAppESeriesAsup()
+ with mock.patch(self.REQ_FUNC, return_value=(200, {"asupCapable": False, "onDemandCapable": False})):
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to retrieve ASUP configuration!"):
+ asup.get_configuration()
+
+ def test_in_maintenance_mode_pass(self):
+ """Verify whether asup is in maintenance mode successful."""
+ self._set_args({"state": "disabled", "active": False})
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.00.00.00"}), (200, {"runningAsProxy": False})]):
+ asup = NetAppESeriesAsup()
+ with mock.patch(self.REQ_FUNC, return_value=(200, [{"key": "ansible_asup_maintenance_stop_time", "value": str(time.time() + 10000)}])):
+ self.assertTrue(asup.in_maintenance_mode())
+
+ self._set_args({"state": "disabled", "active": False})
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.00.00.00"}), (200, {"runningAsProxy": False})]):
+ asup = NetAppESeriesAsup()
+ with mock.patch(self.REQ_FUNC, return_value=(200, [{"key": "ansible_asup_maintenance_email_list", "value": "janey@netapp.com,joe@netapp.com"},
+ {"key": "ansible_asup_maintenance_stop_time", "value": str(time.time() - 1)}])):
+ self.assertFalse(asup.in_maintenance_mode())
+
+ def test_in_maintenance_mode_fail(self):
+ """Verify that in_maintenance mode throws expected exceptions."""
+ self._set_args({"state": "disabled", "active": False})
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.00.00.00"}), (200, {"runningAsProxy": False})]):
+ asup = NetAppESeriesAsup()
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to retrieve maintenance windows information!"):
+ with mock.patch(self.REQ_FUNC, return_value=Exception()):
+ asup.in_maintenance_mode()
+
+ def test_update_configuration_pass(self):
+ """Verify that update_configuration completes successfully."""
+ asup_config = [{"asupCapable": True,
+ "onDemandCapable": True,
+ "asupEnabled": True,
+ "onDemandEnabled": True,
+ "remoteDiagsEnabled": True,
+ "delivery": {"method": "smtp",
+ "routingType": "none",
+ "proxyHost": None,
+ "proxyPort": 0,
+ "proxyUserName": None,
+ "proxyPassword": None,
+ "proxyScript": None,
+ "mailRelayServer": "server@example.com",
+ "mailSenderAddress": "noreply@example.com"},
+ "destinationAddress": "autosupport@netapp.com",
+ "schedule": {"dailyMinTime": 0,
+ "dailyMaxTime": 1439,
+ "weeklyMinTime": 0,
+ "weeklyMaxTime": 1439,
+ "daysOfWeek": ["sunday", "monday", "tuesday"]}},
+ {"asupCapable": True,
+ "onDemandCapable": True,
+ "asupEnabled": True,
+ "onDemandEnabled": False,
+ "remoteDiagsEnabled": False,
+ "delivery": {
+ "method": "https",
+ "routingType": "proxyServer",
+ "proxyHost": "192.168.1.100",
+ "proxyPort": 1234,
+ "proxyUserName": None,
+ "proxyPassword": None,
+ "proxyScript": None,
+ "mailRelayServer": None,
+ "mailSenderAddress": None
+ },
+ "destinationAddress": "https://support.netapp.com/put/AsupPut/",
+ "schedule": {
+ "dailyMinTime": 1200,
+ "dailyMaxTime": 1439,
+ "weeklyMinTime": 0,
+ "weeklyMaxTime": 1439,
+ "daysOfWeek": ["sunday", "saturday"]}},
+ {"asupCapable": True,
+ "onDemandCapable": True,
+ "asupEnabled": True,
+ "onDemandEnabled": False,
+ "remoteDiagsEnabled": False,
+ "delivery": {
+ "method": "https",
+ "routingType": "proxyScript",
+ "proxyHost": None,
+ "proxyPort": 0,
+ "proxyUserName": None,
+ "proxyPassword": None,
+ "proxyScript": "/home/user/path/to/script.sh",
+ "mailRelayServer": None,
+ "mailSenderAddress": None
+ },
+ "destinationAddress": "https://support.netapp.com/put/AsupPut/",
+ "schedule": {
+ "dailyMinTime": 0,
+ "dailyMaxTime": 420,
+ "weeklyMinTime": 0,
+ "weeklyMaxTime": 1439,
+ "daysOfWeek": ["monday", "tuesday", "wednesday", "thursday", "friday"]}}]
+ options_list = [{"state": "disabled", "active": False},
+ {"state": "enabled", "active": False, "start": 20, "end": 24, "days": ["saturday"],
+ "method": "email", "email": {"server": "192.168.1.100", "sender": "noreply@netapp.com"}},
+ {"state": "enabled", "active": False, "start": 20, "end": 24, "days": ["sunday"],
+ "method": "https", "routing_type": "direct"},
+ {"state": "enabled", "active": False, "start": 20, "end": 24, "days": ["saturday", "sunday"],
+ "method": "https", "routing_type": "proxy", "proxy": {"host": "192.168.1.100", "port": 1234}},
+ {"state": "enabled", "active": False, "start": 20, "end": 24, "days": ["saturday", "sunday"],
+ "method": "https", "routing_type": "script", "proxy": {"script": "/path/to/proxy/script.sh"}},
+ {"state": "maintenance_enabled", "maintenance_duration": 24, "maintenance_emails": ["janey@netapp.com", "joe@netapp.com"]},
+ {"state": "maintenance_disabled"}]
+
+ for index, options in enumerate(options_list):
+ self._set_args(options)
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.00.00.00"}), (200, {"runningAsProxy": False})]):
+ asup = NetAppESeriesAsup()
+ asup.get_configuration = lambda: asup_config[index % 3]
+ asup.in_maintenance_mode = lambda: False
+
+ with mock.patch(self.REQ_FUNC, return_value=(200, None)):
+ asup.update_configuration()
+
+ def test_update_configuration_fail(self):
+ """Verify that update_configuration throws expected exceptions."""
+ asup_config = {"asupCapable": True,
+ "onDemandCapable": True,
+ "asupEnabled": True,
+ "onDemandEnabled": True,
+ "remoteDiagsEnabled": True,
+ "delivery": {"method": "smtp",
+ "routingType": "none",
+ "proxyHost": None,
+ "proxyPort": 0,
+ "proxyUserName": None,
+ "proxyPassword": None,
+ "proxyScript": None,
+ "mailRelayServer": "server@example.com",
+ "mailSenderAddress": "noreply@example.com"},
+ "destinationAddress": "autosupport@netapp.com",
+ "schedule": {"dailyMinTime": 0,
+ "dailyMaxTime": 1439,
+ "weeklyMinTime": 0,
+ "weeklyMaxTime": 1439,
+ "daysOfWeek": ["sunday", "monday", "tuesday"]}}
+
+ # Exceptions for state=="enabled" or state=="disabled"
+ self._set_args({"state": "enabled", "active": False, "start": 20, "end": 24, "days": ["saturday"],
+ "method": "email", "email": {"server": "192.168.1.100", "sender": "noreply@netapp.com"}})
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.00.00.00"}), (200, {"runningAsProxy": False})]):
+ asup = NetAppESeriesAsup()
+ asup.get_configuration = lambda: asup_config
+ asup.in_maintenance_mode = lambda: False
+ asup.validate = lambda: True
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to validate ASUP configuration!"):
+ with mock.patch(self.REQ_FUNC, return_value=Exception()):
+ asup.update_configuration()
+ self._set_args({"state": "disabled", "active": False})
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.00.00.00"}), (200, {"runningAsProxy": False})]):
+ asup = NetAppESeriesAsup()
+ asup.get_configuration = lambda: asup_config
+ asup.in_maintenance_mode = lambda: False
+ asup.validate = lambda: False
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to change ASUP configuration!"):
+ with mock.patch(self.REQ_FUNC, return_value=Exception()):
+ asup.update_configuration()
+
+ # Exceptions for state=="maintenance enabled"
+ self._set_args({"state": "maintenance_enabled", "maintenance_duration": 24, "maintenance_emails": ["janey@netapp.com", "joe@netapp.com"]})
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.00.00.00"}), (200, {"runningAsProxy": False})]):
+ asup = NetAppESeriesAsup()
+ asup.get_configuration = lambda: {"asupEnabled": False}
+ asup.in_maintenance_mode = lambda: False
+ with self.assertRaisesRegexp(AnsibleFailJson, "AutoSupport must be enabled before enabling or disabling maintenance mode."):
+ asup.update_configuration()
+ self._set_args({"state": "maintenance_enabled", "maintenance_duration": 24, "maintenance_emails": ["janey@netapp.com", "joe@netapp.com"]})
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.00.00.00"}), (200, {"runningAsProxy": False})]):
+ asup = NetAppESeriesAsup()
+ asup.get_configuration = lambda: {"asupEnabled": True}
+ asup.in_maintenance_mode = lambda: False
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to enabled ASUP maintenance window."):
+ with mock.patch(self.REQ_FUNC, return_value=Exception()):
+ asup.update_configuration()
+ self._set_args({"state": "maintenance_enabled", "maintenance_duration": 24, "maintenance_emails": ["janey@netapp.com", "joe@netapp.com"]})
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.00.00.00"}), (200, {"runningAsProxy": False})]):
+ asup = NetAppESeriesAsup()
+ asup.get_configuration = lambda: {"asupEnabled": True}
+ asup.in_maintenance_mode = lambda: False
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to store maintenance information."):
+ with mock.patch(self.REQ_FUNC, side_effect=[(200, None), Exception()]):
+ asup.update_configuration()
+ self._set_args({"state": "maintenance_enabled", "maintenance_duration": 24, "maintenance_emails": ["janey@netapp.com", "joe@netapp.com"]})
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.00.00.00"}), (200, {"runningAsProxy": False})]):
+ asup = NetAppESeriesAsup()
+ asup.get_configuration = lambda: {"asupEnabled": True}
+ asup.in_maintenance_mode = lambda: False
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to store maintenance information."):
+ with mock.patch(self.REQ_FUNC, side_effect=[(200, None), (200, None), Exception()]):
+ asup.update_configuration()
+
+ # Exceptions for state=="maintenance disabled"
+ self._set_args({"state": "maintenance_disabled"})
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.00.00.00"}), (200, {"runningAsProxy": False})]):
+ asup = NetAppESeriesAsup()
+ asup.get_configuration = lambda: {"asupEnabled": True}
+ asup.in_maintenance_mode = lambda: True
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to disable ASUP maintenance window."):
+ with mock.patch(self.REQ_FUNC, return_value=Exception()):
+ asup.update_configuration()
+ self._set_args({"state": "maintenance_disabled"})
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.00.00.00"}), (200, {"runningAsProxy": False})]):
+ asup = NetAppESeriesAsup()
+ asup.get_configuration = lambda: {"asupEnabled": True}
+ asup.in_maintenance_mode = lambda: True
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to store maintenance information."):
+ with mock.patch(self.REQ_FUNC, side_effect=[(200, None), Exception()]):
+ asup.update_configuration()
+ self._set_args({"state": "maintenance_disabled"})
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.00.00.00"}), (200, {"runningAsProxy": False})]):
+ asup = NetAppESeriesAsup()
+ asup.get_configuration = lambda: {"asupEnabled": True}
+ asup.in_maintenance_mode = lambda: True
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to store maintenance information."):
+ with mock.patch(self.REQ_FUNC, side_effect=[(200, None), (200, None), Exception()]):
+ asup.update_configuration()
diff --git a/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_auditlog.py b/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_auditlog.py
new file mode 100644
index 000000000..1cb57068a
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_auditlog.py
@@ -0,0 +1,205 @@
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_auditlog import NetAppESeriesAuditLog
+from units.modules.utils import AnsibleFailJson, ModuleTestCase, set_module_args
+from units.compat import mock
+
+
+class NetAppESeriesAuditLogTests(ModuleTestCase):
+ REQUIRED_PARAMS = {'api_username': 'rw',
+ 'api_password': 'password',
+ 'api_url': 'http://localhost',
+ 'ssid': '1'}
+ REQ_FUNC = 'ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_auditlog.NetAppESeriesAuditLog.request'
+ BASE_REQ_FUNC = 'ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity.request'
+ MAX_RECORDS_MAXIMUM = 50000
+ MAX_RECORDS_MINIMUM = 100
+
+ def _set_args(self, **kwargs):
+ module_args = self.REQUIRED_PARAMS.copy()
+ if kwargs is not None:
+ module_args.update(kwargs)
+ set_module_args(module_args)
+
+ def test_max_records_argument_pass(self):
+ """Verify NetAppESeriesAuditLog argument's max_records and threshold upper and lower boundaries."""
+ initial = {"max_records": 1000,
+ "log_level": "writeOnly",
+ "full_policy": "overWrite",
+ "threshold": 90}
+ max_records_set = (self.MAX_RECORDS_MINIMUM, 25000, self.MAX_RECORDS_MAXIMUM)
+
+ for max_records in max_records_set:
+ initial["max_records"] = max_records
+ self._set_args(**initial)
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.00.00.00"}), (200, {"runningAsProxy": False})]):
+ audit_log = NetAppESeriesAuditLog()
+ self.assertTrue(audit_log.max_records == max_records)
+
+ def test_max_records_argument_fail(self):
+ """Verify NetAppESeriesAuditLog arument's max_records and threshold upper and lower boundaries."""
+ initial = {"max_records": 1000,
+ "log_level": "writeOnly",
+ "full_policy": "overWrite",
+ "threshold": 90}
+ max_records_set = (self.MAX_RECORDS_MINIMUM - 1, self.MAX_RECORDS_MAXIMUM + 1)
+
+ for max_records in max_records_set:
+ with self.assertRaisesRegexp(AnsibleFailJson, r"Audit-log max_records count must be between 100 and 50000"):
+ initial["max_records"] = max_records
+ self._set_args(**initial)
+ NetAppESeriesAuditLog()
+
+ def test_threshold_argument_pass(self):
+ """Verify NetAppESeriesAuditLog argument's max_records and threshold upper and lower boundaries."""
+ initial = {"max_records": 1000,
+ "log_level": "writeOnly",
+ "full_policy": "overWrite",
+ "threshold": 90}
+ threshold_set = (60, 75, 90)
+
+ for threshold in threshold_set:
+ initial["threshold"] = threshold
+ self._set_args(**initial)
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.00.00.00"}), (200, {"runningAsProxy": False})]):
+ audit_log = NetAppESeriesAuditLog()
+ self.assertTrue(audit_log.threshold == threshold)
+
+ def test_threshold_argument_fail(self):
+ """Verify NetAppESeriesAuditLog arument's max_records and threshold upper and lower boundaries."""
+ initial = {"max_records": 1000,
+ "log_level": "writeOnly",
+ "full_policy": "overWrite",
+ "threshold": 90}
+ threshold_set = (59, 91)
+
+ for threshold in threshold_set:
+ with self.assertRaisesRegexp(AnsibleFailJson, r"Audit-log percent threshold must be between 60 and 90"):
+ initial["threshold"] = threshold
+ self._set_args(**initial)
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.00.00.00"}), (200, {"runningAsProxy": False})]):
+ NetAppESeriesAuditLog()
+
+ def test_get_configuration_pass(self):
+ """Validate get configuration does not throw exception when normal request is returned."""
+ initial = {"max_records": 1000,
+ "log_level": "writeOnly",
+ "full_policy": "overWrite",
+ "threshold": 90}
+ expected = {"auditLogMaxRecords": 1000,
+ "auditLogLevel": "writeOnly",
+ "auditLogFullPolicy": "overWrite",
+ "auditLogWarningThresholdPct": 90}
+
+ self._set_args(**initial)
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.00.00.00"}), (200, {"runningAsProxy": False})]):
+ audit_log = NetAppESeriesAuditLog()
+
+ with mock.patch(self.REQ_FUNC, return_value=(200, expected)):
+ body = audit_log.get_configuration()
+ self.assertTrue(body == expected)
+
+ def test_get_configuration_fail(self):
+ """Verify AnsibleJsonFail exception is thrown."""
+ initial = {"max_records": 1000,
+ "log_level": "writeOnly",
+ "full_policy": "overWrite",
+ "threshold": 90}
+
+ self._set_args(**initial)
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.00.00.00"}), (200, {"runningAsProxy": False})]):
+ audit_log = NetAppESeriesAuditLog()
+
+ with self.assertRaisesRegexp(AnsibleFailJson, r"Failed to retrieve the audit-log configuration!"):
+ with mock.patch(self.REQ_FUNC, return_value=Exception()):
+ audit_log.get_configuration()
+
+ def test_build_configuration_pass(self):
+ """Validate configuration changes will force an update."""
+ response = {"auditLogMaxRecords": 1000,
+ "auditLogLevel": "writeOnly",
+ "auditLogFullPolicy": "overWrite",
+ "auditLogWarningThresholdPct": 90}
+ initial = {"max_records": 1000,
+ "log_level": "writeOnly",
+ "full_policy": "overWrite",
+ "threshold": 90}
+ changes = [{"max_records": 50000},
+ {"log_level": "all"},
+ {"full_policy": "preventSystemAccess"},
+ {"threshold": 75}]
+
+ for change in changes:
+ initial_with_changes = initial.copy()
+ initial_with_changes.update(change)
+ self._set_args(**initial_with_changes)
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.00.00.00"}), (200, {"runningAsProxy": False})]):
+ audit_log = NetAppESeriesAuditLog()
+
+ with mock.patch(self.REQ_FUNC, return_value=(200, response)):
+ update = audit_log.build_configuration()
+ self.assertTrue(update)
+
+ def test_delete_log_messages_fail(self):
+ """Verify AnsibleJsonFail exception is thrown."""
+ initial = {"max_records": 1000,
+ "log_level": "writeOnly",
+ "full_policy": "overWrite",
+ "threshold": 90}
+
+ self._set_args(**initial)
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.00.00.00"}), (200, {"runningAsProxy": False})]):
+ audit_log = NetAppESeriesAuditLog()
+
+ with self.assertRaisesRegexp(AnsibleFailJson, r"Failed to delete audit-log messages!"):
+ with mock.patch(self.REQ_FUNC, return_value=Exception()):
+ audit_log.delete_log_messages()
+
+ def test_update_configuration_delete_pass(self):
+ """Verify 422 and force successfully returns True."""
+ body = {"auditLogMaxRecords": 1000,
+ "auditLogLevel": "writeOnly",
+ "auditLogFullPolicy": "overWrite",
+ "auditLogWarningThresholdPct": 90}
+ initial = {"max_records": 2000,
+ "log_level": "writeOnly",
+ "full_policy": "overWrite",
+ "threshold": 90,
+ "force": True}
+
+ self._set_args(**initial)
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.00.00.00"}), (200, {"runningAsProxy": False})]):
+ audit_log = NetAppESeriesAuditLog()
+ with mock.patch(self.REQ_FUNC, side_effect=[(200, body),
+ (422, {u"invalidFieldsIfKnown": None,
+ u"errorMessage": u"Configuration change...",
+ u"localizedMessage": u"Configuration change...",
+ u"retcode": u"auditLogImmediateFullCondition",
+ u"codeType": u"devicemgrerror"}),
+ (200, None),
+ (200, None)]):
+ self.assertTrue(audit_log.update_configuration())
+
+ def test_update_configuration_delete_skip_fail(self):
+ """Verify 422 and no force results in AnsibleJsonFail exception."""
+ body = {"auditLogMaxRecords": 1000,
+ "auditLogLevel": "writeOnly",
+ "auditLogFullPolicy": "overWrite",
+ "auditLogWarningThresholdPct": 90}
+ initial = {"max_records": 2000,
+ "log_level": "writeOnly",
+ "full_policy": "overWrite",
+ "threshold": 90,
+ "force": False}
+
+ self._set_args(**initial)
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.00.00.00"}), (200, {"runningAsProxy": False})]):
+ audit_log = NetAppESeriesAuditLog()
+
+ with self.assertRaisesRegexp(AnsibleFailJson, r"Failed to update audit-log configuration!"):
+ with mock.patch(self.REQ_FUNC, side_effect=[(200, body), Exception(422, {"errorMessage": "error"}),
+ (200, None), (200, None)]):
+ audit_log.update_configuration()
diff --git a/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_auth.py b/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_auth.py
new file mode 100644
index 000000000..305d6028c
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_auth.py
@@ -0,0 +1,488 @@
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_auth import NetAppESeriesAuth
+from units.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args
+from units.compat import mock
+
+
+class AuthTest(ModuleTestCase):
+ REQUIRED_PARAMS = {"api_username": "admin", "api_password": "password", "api_url": "http://localhost", "ssid": "1"}
+ REQ_FUNC = "ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_auth.NetAppESeriesAuth.request"
+ SLEEP_FUNC = "ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_auth.sleep"
+
+ def _set_args(self, args=None):
+ module_args = self.REQUIRED_PARAMS.copy()
+ if args is not None:
+ module_args.update(args)
+ set_module_args(module_args)
+
+ def test_minimum_password_length_change_required_pass(self):
+ """Verify minimum_password_length_change_required returns expected values."""
+ self._set_args({"ssid": "Proxy", "user": "admin", "password": "adminpass", "minimum_password_length": 8})
+ auth = NetAppESeriesAuth()
+ auth.is_proxy = lambda: True
+ auth.is_embedded_available = lambda: False
+ with mock.patch(self.REQ_FUNC, return_value=(200, {"adminPasswordSet": False, "minimumPasswordLength": 8})):
+ self.assertFalse(auth.minimum_password_length_change_required())
+ self._set_args({"ssid": "Proxy", "user": "admin", "password": "adminpass", "minimum_password_length": 7})
+ auth = NetAppESeriesAuth()
+ auth.is_proxy = lambda: True
+ auth.is_embedded_available = lambda: False
+ with mock.patch(self.REQ_FUNC, return_value=(200, {"adminPasswordSet": False, "minimumPasswordLength": 8})):
+ self.assertTrue(auth.minimum_password_length_change_required())
+
+ self._set_args({"ssid": "10", "user": "admin", "password": "adminpass", "minimum_password_length": 8})
+ auth = NetAppESeriesAuth()
+ auth.is_proxy = lambda: True
+ auth.is_embedded_available = lambda: False
+ with mock.patch(self.REQ_FUNC, return_value=(200, {"adminPasswordSet": False, "minimumPasswordLength": 8})):
+ self.assertFalse(auth.minimum_password_length_change_required())
+
+ self._set_args({"ssid": "10", "user": "admin", "password": "adminpass", "minimum_password_length": 8})
+ auth = NetAppESeriesAuth()
+ auth.is_proxy = lambda: True
+ auth.is_embedded_available = lambda: True
+ with mock.patch(self.REQ_FUNC, return_value=(200, {"adminPasswordSet": False, "minimumPasswordLength": 8})):
+ self.assertFalse(auth.minimum_password_length_change_required())
+ self._set_args({"ssid": "10", "user": "admin", "password": "adminpass", "minimum_password_length": 7})
+ auth = NetAppESeriesAuth()
+ auth.is_proxy = lambda: True
+ auth.is_embedded_available = lambda: True
+ with mock.patch(self.REQ_FUNC, return_value=(200, {"adminPasswordSet": False, "minimumPasswordLength": 8})):
+ self.assertTrue(auth.minimum_password_length_change_required())
+
+ self._set_args({"ssid": "1", "user": "admin", "password": "adminpass", "minimum_password_length": 8})
+ auth = NetAppESeriesAuth()
+ auth.is_proxy = lambda: False
+ auth.is_embedded_available = lambda: True
+ with mock.patch(self.REQ_FUNC, return_value=(200, {"adminPasswordSet": False, "minimumPasswordLength": 8})):
+ self.assertFalse(auth.minimum_password_length_change_required())
+ self._set_args({"ssid": "1", "user": "admin", "password": "adminpass", "minimum_password_length": 7})
+ auth = NetAppESeriesAuth()
+ auth.is_proxy = lambda: False
+ auth.is_embedded_available = lambda: True
+ with mock.patch(self.REQ_FUNC, return_value=(200, {"adminPasswordSet": False, "minimumPasswordLength": 8})):
+ self.assertTrue(auth.minimum_password_length_change_required())
+
+ def test_minimum_password_length_change_required_fail(self):
+ """Verify minimum_password_length_change_required throws expected exceptions."""
+ self._set_args({"ssid": "Proxy", "user": "admin", "password": "adminpass", "minimum_password_length": 10})
+ auth = NetAppESeriesAuth()
+ auth.is_proxy = lambda: False
+ with self.assertRaisesRegexp(AnsibleFailJson, "Password does not meet the length requirement"):
+ with mock.patch(self.REQ_FUNC, return_value=(200, {"adminPasswordSet": False, "minimumPasswordLength": 8})):
+ auth.minimum_password_length_change_required()
+
+ self._set_args({"ssid": "Proxy", "user": "admin", "password": "adminpass"})
+ auth = NetAppESeriesAuth()
+ auth.is_proxy = lambda: False
+ with self.assertRaisesRegexp(AnsibleFailJson, "Password does not meet the length requirement"):
+ with mock.patch(self.REQ_FUNC, return_value=(200, {"adminPasswordSet": True, "minimumPasswordLength": 10})):
+ auth.minimum_password_length_change_required()
+
+ def test_update_minimum_password_length_pass(self):
+ """Verify update_minimum_password_length returns expected values."""
+ self._set_args({"ssid": "Proxy", "user": "admin", "password": "adminpass", "minimum_password_length": 8})
+ auth = NetAppESeriesAuth()
+ auth.is_proxy = lambda: True
+ auth.is_embedded_available = lambda: False
+ auth.is_admin_password_set = True
+ with mock.patch(self.REQ_FUNC, return_value=(200, None)):
+ auth.update_minimum_password_length()
+ self._set_args({"ssid": "Proxy", "user": "admin", "password": "adminpass", "minimum_password_length": 8})
+ auth = NetAppESeriesAuth()
+ auth.is_proxy = lambda: True
+ auth.is_embedded_available = lambda: False
+ auth.is_admin_password_set = False
+ with mock.patch(self.REQ_FUNC, return_value=(200, None)):
+ auth.update_minimum_password_length()
+ self._set_args({"ssid": "Proxy", "user": "admin", "password": "adminpass", "minimum_password_length": 8})
+ auth = NetAppESeriesAuth()
+ auth.is_proxy = lambda: True
+ auth.is_embedded_available = lambda: False
+ auth.is_admin_password_set = False
+ with mock.patch(self.REQ_FUNC, side_effect=[Exception(), (200, None)]):
+ auth.update_minimum_password_length()
+
+ self._set_args({"ssid": "10", "user": "admin", "password": "adminpass", "minimum_password_length": 8})
+ auth = NetAppESeriesAuth()
+ auth.is_proxy = lambda: True
+ auth.is_embedded_available = lambda: True
+ auth.is_admin_password_set = True
+ with mock.patch(self.REQ_FUNC, return_value=(200, None)):
+ auth.update_minimum_password_length()
+ self._set_args({"ssid": "10", "user": "admin", "password": "adminpass", "minimum_password_length": 8})
+ auth = NetAppESeriesAuth()
+ auth.is_proxy = lambda: True
+ auth.is_embedded_available = lambda: True
+ auth.is_admin_password_set = False
+ with mock.patch(self.REQ_FUNC, return_value=(200, None)):
+ auth.update_minimum_password_length()
+
+ self._set_args({"ssid": "1", "user": "admin", "password": "adminpass", "minimum_password_length": 8})
+ auth = NetAppESeriesAuth()
+ auth.is_proxy = lambda: False
+ auth.is_embedded_available = lambda: True
+ auth.is_admin_password_set = True
+ with mock.patch(self.REQ_FUNC, return_value=(200, None)):
+ auth.update_minimum_password_length()
+ self._set_args({"ssid": "1", "user": "admin", "password": "adminpass", "minimum_password_length": 8})
+ auth = NetAppESeriesAuth()
+ auth.is_proxy = lambda: False
+ auth.is_embedded_available = lambda: True
+ auth.is_admin_password_set = False
+ with mock.patch(self.REQ_FUNC, return_value=(200, None)):
+ auth.update_minimum_password_length()
+
+ def test_update_minimum_password_length_fail(self):
+ """Verify update_minimum_password_length throws expected exceptions."""
+ self._set_args({"ssid": "Proxy", "user": "admin", "password": "adminpass", "minimum_password_length": 8})
+ auth = NetAppESeriesAuth()
+ auth.is_proxy = lambda: True
+ auth.is_embedded_available = lambda: False
+ auth.is_admin_password_set = False
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to set minimum password length."):
+ with mock.patch(self.REQ_FUNC, return_value=Exception()):
+ auth.update_minimum_password_length()
+
+ self._set_args({"ssid": "10", "user": "admin", "password": "adminpass", "minimum_password_length": 8})
+ auth = NetAppESeriesAuth()
+ auth.is_proxy = lambda: True
+ auth.is_embedded_available = lambda: True
+ auth.is_admin_password_set = False
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to set minimum password length."):
+ with mock.patch(self.REQ_FUNC, return_value=Exception()):
+ auth.update_minimum_password_length()
+
+ self._set_args({"ssid": "1", "user": "admin", "password": "adminpass", "minimum_password_length": 8})
+ auth = NetAppESeriesAuth()
+ auth.is_proxy = lambda: False
+ auth.is_embedded_available = lambda: True
+ auth.is_admin_password_set = False
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to set minimum password length."):
+ with mock.patch(self.REQ_FUNC, return_value=Exception()):
+ auth.update_minimum_password_length()
+
+ def test_logout_system_pass(self):
+ """Verify logout_system returns expected values."""
+ self._set_args({"ssid": "Proxy", "user": "admin", "password": "adminpass", "minimum_password_length": 8})
+ auth = NetAppESeriesAuth()
+ auth.is_proxy = lambda: True
+ auth.is_embedded_available = lambda: False
+ with mock.patch(self.REQ_FUNC, return_value=(204, None)):
+ auth.logout_system()
+ self._set_args({"ssid": "10", "user": "admin", "password": "adminpass", "minimum_password_length": 8})
+ auth = NetAppESeriesAuth()
+ auth.is_proxy = lambda: True
+ auth.is_embedded_available = lambda: False
+ with mock.patch(self.REQ_FUNC, return_value=(204, None)):
+ auth.logout_system()
+ self._set_args({"ssid": "Proxy", "user": "admin", "password": "adminpass", "minimum_password_length": 8})
+ auth = NetAppESeriesAuth()
+ auth.is_proxy = lambda: True
+ auth.is_embedded_available = lambda: True
+ with mock.patch(self.REQ_FUNC, return_value=(204, None)):
+ auth.logout_system()
+ self._set_args({"ssid": "Proxy", "user": "admin", "password": "adminpass", "minimum_password_length": 8})
+ auth = NetAppESeriesAuth()
+ auth.is_proxy = lambda: False
+ auth.is_embedded_available = lambda: True
+ with mock.patch(self.REQ_FUNC, return_value=(204, None)):
+ auth.logout_system()
+
+ def test_password_change_required_pass(self):
+ """Verify password_change_required returns expected values."""
+ self._set_args({"ssid": "Proxy", "user": "admin"})
+ auth = NetAppESeriesAuth()
+ self.assertFalse(auth.password_change_required())
+
+ self._set_args({"ssid": "Proxy", "user": "admin", "password": "adminpass"})
+ auth = NetAppESeriesAuth()
+ auth.is_proxy = lambda: True
+ auth.is_embedded_available = lambda: False
+ with mock.patch(self.REQ_FUNC, side_effect=[(200, {"minimumPasswordLength": 8, "adminPasswordSet": False})]):
+ self.assertTrue(auth.password_change_required())
+ self._set_args({"ssid": "10", "user": "admin", "password": "adminpass"})
+ auth = NetAppESeriesAuth()
+ auth.is_proxy = lambda: True
+ auth.is_embedded_available = lambda: True
+ with mock.patch(self.REQ_FUNC, side_effect=[(200, {"minimumPasswordLength": 8, "adminPasswordSet": False})]):
+ self.assertTrue(auth.password_change_required())
+ self._set_args({"ssid": "10", "user": "admin", "password": "adminpass"})
+ auth = NetAppESeriesAuth()
+ auth.is_proxy = lambda: True
+ auth.is_embedded_available = lambda: False
+ with mock.patch(self.REQ_FUNC, side_effect=[(200, {"minimumPasswordLength": 8, "adminPasswordSet": False})]):
+ self.assertTrue(auth.password_change_required())
+ self._set_args({"ssid": "10", "user": "admin", "password": "adminpass"})
+ auth = NetAppESeriesAuth()
+ auth.is_proxy = lambda: False
+ auth.is_embedded_available = lambda: True
+ with mock.patch(self.REQ_FUNC, side_effect=[(200, {"minimumPasswordLength": 8, "adminPasswordSet": False})]):
+ self.assertTrue(auth.password_change_required())
+
+ self._set_args({"ssid": "Proxy", "user": "admin", "password": "adminpass"})
+ auth = NetAppESeriesAuth()
+ auth.is_proxy = lambda: True
+ auth.is_embedded_available = lambda: False
+ auth.logout_system = lambda: None
+ with mock.patch(self.REQ_FUNC, side_effect=[(200, {"minimumPasswordLength": 8, "adminPasswordSet": True}), (200, None)]):
+ self.assertFalse(auth.password_change_required())
+ with mock.patch(self.REQ_FUNC, side_effect=[(200, {"minimumPasswordLength": 8, "adminPasswordSet": True}), (401, None)]):
+ self.assertTrue(auth.password_change_required())
+
+ self._set_args({"ssid": "10", "user": "admin", "password": "adminpass"})
+ auth = NetAppESeriesAuth()
+ auth.is_proxy = lambda: True
+ auth.is_embedded_available = lambda: True
+ auth.logout_system = lambda: None
+ with mock.patch(self.REQ_FUNC, side_effect=[(200, {"minimumPasswordLength": 8, "adminPasswordSet": True}), (200, None)]):
+ self.assertFalse(auth.password_change_required())
+ with mock.patch(self.REQ_FUNC, side_effect=[(200, {"minimumPasswordLength": 8, "adminPasswordSet": True}), (401, None)]):
+ self.assertTrue(auth.password_change_required())
+
+ self._set_args({"ssid": "10", "user": "admin", "password": "adminpass"})
+ auth = NetAppESeriesAuth()
+ auth.is_proxy = lambda: True
+ auth.is_embedded_available = lambda: False
+ auth.logout_system = lambda: None
+ with mock.patch(self.REQ_FUNC, side_effect=[(200, {"minimumPasswordLength": 8, "adminPasswordSet": True}), (200, {"isValidPassword": True})]):
+ self.assertFalse(auth.password_change_required())
+ with mock.patch(self.REQ_FUNC, side_effect=[(200, {"minimumPasswordLength": 8, "adminPasswordSet": True}), (200, {"isValidPassword": False})]):
+ self.assertTrue(auth.password_change_required())
+
+ self._set_args({"ssid": "10", "user": "admin", "password": "adminpass"})
+ auth = NetAppESeriesAuth()
+ auth.is_proxy = lambda: False
+ auth.is_embedded_available = lambda: True
+ auth.logout_system = lambda: None
+ with mock.patch(self.REQ_FUNC, side_effect=[(200, {"minimumPasswordLength": 8, "adminPasswordSet": True}), (200, None)]):
+ self.assertFalse(auth.password_change_required())
+ with mock.patch(self.REQ_FUNC, side_effect=[(200, {"minimumPasswordLength": 8, "adminPasswordSet": True}), (401, None)]):
+ self.assertTrue(auth.password_change_required())
+
+ def test_password_change_required_fail(self):
+ """Verify password_change_required throws expected exceptions."""
+ self._set_args({"ssid": "Proxy", "user": "admin", "password": "adminpass"})
+ auth = NetAppESeriesAuth()
+ auth.is_proxy = lambda: True
+ auth.is_embedded_available = lambda: False
+ auth.logout_system = lambda: None
+ with self.assertRaisesRegexp(AnsibleFailJson, "SAML enabled! SAML disables default role based login."):
+ with mock.patch(self.REQ_FUNC, side_effect=[(200, {"minimumPasswordLength": 8, "adminPasswordSet": True}), (422, None)]):
+ auth.password_change_required()
+
+ self._set_args({"ssid": "10", "user": "admin", "password": "adminpass"})
+ auth = NetAppESeriesAuth()
+ auth.is_proxy = lambda: True
+ auth.is_embedded_available = lambda: False
+ auth.logout_system = lambda: None
+ auth.is_web_services_version_met = lambda x: True
+ with self.assertRaisesRegexp(AnsibleFailJson, "For platforms before E2800 use SANtricity Web Services Proxy 4.1 or later!"):
+ with mock.patch(self.REQ_FUNC, side_effect=[(200, {"minimumPasswordLength": 8, "adminPasswordSet": True}), (404, None)]):
+ self.assertFalse(auth.password_change_required())
+ auth.is_web_services_version_met = lambda x: False
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to validate stored password!"):
+ with mock.patch(self.REQ_FUNC, side_effect=[(200, {"minimumPasswordLength": 8, "adminPasswordSet": True}), (404, None)]):
+ self.assertFalse(auth.password_change_required())
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to validate stored password!"):
+ with mock.patch(self.REQ_FUNC, side_effect=[(200, {"minimumPasswordLength": 8, "adminPasswordSet": True}), (422, None)]):
+ self.assertFalse(auth.password_change_required())
+
+ self._set_args({"ssid": "10", "user": "monitor", "password": "monitorpass"})
+ auth = NetAppESeriesAuth()
+ auth.is_proxy = lambda: True
+ auth.is_embedded_available = lambda: False
+ auth.logout_system = lambda: None
+ auth.is_web_services_version_met = lambda x: True
+ with self.assertRaisesRegexp(AnsibleFailJson, "Role based login not available! Only storage system password can be set for storage systems prior to"):
+ with mock.patch(self.REQ_FUNC, side_effect=[(200, {"minimumPasswordLength": 8, "adminPasswordSet": True})]):
+ self.assertFalse(auth.password_change_required())
+
+ def test_set_array_admin_password_pass(self):
+ """Verify set_array_admin_password results."""
+ self._set_args({"ssid": "Proxy", "user": "admin", "password": "adminpass"})
+ auth = NetAppESeriesAuth()
+ auth.is_proxy = lambda: True
+ auth.is_embedded_available = lambda: False
+ with mock.patch(self.REQ_FUNC, side_effect=[(200, None)]):
+ auth.set_array_admin_password()
+ with mock.patch(self.REQ_FUNC, side_effect=[Exception(), (200, None)]):
+ auth.set_array_admin_password()
+
+ self._set_args({"ssid": "10", "user": "admin", "password": "adminpass"})
+ auth = NetAppESeriesAuth()
+ auth.is_proxy = lambda: True
+ auth.is_embedded_available = lambda: False
+ with mock.patch(self.REQ_FUNC, side_effect=[(200, None)]):
+ auth.set_array_admin_password()
+ auth.is_embedded_available = lambda: True
+ with mock.patch(self.REQ_FUNC, side_effect=[(200, None)]):
+ auth.set_array_admin_password()
+
+ self._set_args({"ssid": "1", "user": "admin", "password": "adminpass"})
+ auth = NetAppESeriesAuth()
+ auth.is_proxy = lambda: False
+ auth.is_embedded_available = lambda: True
+ with mock.patch(self.REQ_FUNC, return_value=(200, None)):
+ auth.set_array_admin_password()
+
+ def test_set_array_admin_password_fail(self):
+ """Verify set_array_admin_password throws expected exceptions."""
+ self._set_args({"ssid": "Proxy", "user": "admin", "password": "adminpass"})
+ auth = NetAppESeriesAuth()
+ auth.is_proxy = lambda: True
+ auth.is_embedded_available = lambda: False
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to set proxy's admin password."):
+ with mock.patch(self.REQ_FUNC, side_effect=[Exception(), Exception()]):
+ auth.set_array_admin_password()
+
+ self._set_args({"ssid": "10", "user": "admin", "password": "adminpass"})
+ auth = NetAppESeriesAuth()
+ auth.is_proxy = lambda: True
+ auth.is_embedded_available = lambda: False
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to set storage system's admin password."):
+ with mock.patch(self.REQ_FUNC, return_value=Exception()):
+ auth.set_array_admin_password()
+
+ self._set_args({"ssid": "1", "user": "admin", "password": "adminpass"})
+ auth = NetAppESeriesAuth()
+ auth.is_proxy = lambda: False
+ auth.is_embedded_available = lambda: True
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to set embedded storage system's admin password."):
+ with mock.patch(self.REQ_FUNC, return_value=Exception()):
+ auth.set_array_admin_password()
+
+ def test_set_array_password_pass(self):
+ """Verify set_array_password results."""
+ self._set_args({"ssid": "Proxy", "user": "admin", "password": "adminpass"})
+ auth = NetAppESeriesAuth()
+ auth.is_proxy = lambda: True
+ auth.is_embedded_available = lambda: False
+ auth.is_admin_password_set = True
+ with mock.patch(self.REQ_FUNC, return_value=(200, None)):
+ auth.set_array_password()
+
+ self._set_args({"ssid": "10", "user": "admin", "password": "adminpass"})
+ auth = NetAppESeriesAuth()
+ auth.is_proxy = lambda: True
+ auth.is_embedded_available = lambda: True
+ auth.is_admin_password_set = True
+ with mock.patch(self.REQ_FUNC, return_value=(200, None)):
+ auth.set_array_password()
+
+ self._set_args({"ssid": "1", "user": "admin", "password": "adminpass"})
+ auth = NetAppESeriesAuth()
+ auth.is_proxy = lambda: False
+ auth.is_embedded_available = lambda: True
+ auth.is_admin_password_set = True
+ with mock.patch(self.REQ_FUNC, return_value=(200, None)):
+ auth.set_array_password()
+
+ def test_set_array_password_fail(self):
+ """Verify set_array_password throws expected exceptions."""
+ self._set_args({"ssid": "Proxy", "user": "monitor", "password": "adminpass"})
+ auth = NetAppESeriesAuth()
+ auth.is_proxy = lambda: True
+ auth.is_embedded_available = lambda: False
+ auth.is_admin_password_set = False
+ with self.assertRaisesRegexp(AnsibleFailJson, "Admin password not set! Set admin password before changing non-admin user passwords."):
+ auth.set_array_password()
+
+ self._set_args({"ssid": "Proxy", "user": "admin", "password": "adminpass"})
+ auth = NetAppESeriesAuth()
+ auth.is_proxy = lambda: True
+ auth.is_embedded_available = lambda: False
+ auth.is_admin_password_set = True
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to set proxy password."):
+ with mock.patch(self.REQ_FUNC, return_value=Exception()):
+ auth.set_array_password()
+
+ self._set_args({"ssid": "10", "user": "admin", "password": "adminpass"})
+ auth = NetAppESeriesAuth()
+ auth.is_proxy = lambda: True
+ auth.is_embedded_available = lambda: True
+ auth.is_admin_password_set = True
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to set embedded user password."):
+ with mock.patch(self.REQ_FUNC, return_value=Exception()):
+ auth.set_array_password()
+
+ self._set_args({"ssid": "1", "user": "admin", "password": "adminpass"})
+ auth = NetAppESeriesAuth()
+ auth.is_proxy = lambda: False
+ auth.is_embedded_available = lambda: True
+ auth.is_admin_password_set = True
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to set embedded user password."):
+ with mock.patch(self.REQ_FUNC, return_value=Exception()):
+ auth.set_array_password()
+
+ def test_apply_pass(self):
+ """Verify apply results."""
+ self._set_args({"ssid": "1", "user": "admin", "password": "adminpass"})
+ auth = NetAppESeriesAuth()
+ auth.is_admin_password_set = True
+ auth.password_change_required = lambda: True
+ auth.minimum_password_length_change_required = lambda: True
+ auth.update_minimum_password_length = lambda: None
+ auth.set_array_admin_password = lambda: None
+ auth.set_array_password = lambda: None
+ with self.assertRaisesRegexp(AnsibleExitJson, "'admin' password and required password length has been changed."):
+ auth.apply()
+
+ self._set_args({"ssid": "1", "user": "admin", "password": "adminpass"})
+ auth = NetAppESeriesAuth()
+ auth.is_admin_password_set = False
+ auth.password_change_required = lambda: True
+ auth.minimum_password_length_change_required = lambda: True
+ auth.update_minimum_password_length = lambda: None
+ auth.set_array_admin_password = lambda: None
+ auth.set_array_password = lambda: None
+ with self.assertRaisesRegexp(AnsibleExitJson, "'admin' password and required password length has been changed."):
+ auth.apply()
+
+ self._set_args({"ssid": "1", "user": "monitor", "password": "adminpass"})
+ auth = NetAppESeriesAuth()
+ auth.is_admin_password_set = True
+ auth.password_change_required = lambda: True
+ auth.minimum_password_length_change_required = lambda: True
+ auth.update_minimum_password_length = lambda: None
+ auth.set_array_admin_password = lambda: None
+ auth.set_array_password = lambda: None
+ with self.assertRaisesRegexp(AnsibleExitJson, "'monitor' password and required password length has been changed."):
+ auth.apply()
+
+ self._set_args({"ssid": "1", "user": "admin", "password": "adminpass"})
+ auth = NetAppESeriesAuth()
+ auth.is_admin_password_set = True
+ auth.password_change_required = lambda: True
+ auth.minimum_password_length_change_required = lambda: False
+ auth.update_minimum_password_length = lambda: None
+ auth.set_array_admin_password = lambda: None
+ auth.set_array_password = lambda: None
+ with self.assertRaisesRegexp(AnsibleExitJson, "'admin' password has been changed."):
+ auth.apply()
+
+ self._set_args({"ssid": "1", "user": "admin", "password": "adminpass"})
+ auth = NetAppESeriesAuth()
+ auth.is_admin_password_set = True
+ auth.password_change_required = lambda: False
+ auth.minimum_password_length_change_required = lambda: True
+ auth.update_minimum_password_length = lambda: None
+ auth.set_array_admin_password = lambda: None
+ auth.set_array_password = lambda: None
+ with self.assertRaisesRegexp(AnsibleExitJson, "Required password length has been changed."):
+ auth.apply()
+
+ self._set_args({"ssid": "1", "user": "admin", "password": "adminpass"})
+ auth = NetAppESeriesAuth()
+ auth.is_admin_password_set = True
+ auth.password_change_required = lambda: False
+ auth.minimum_password_length_change_required = lambda: False
+ auth.update_minimum_password_length = lambda: None
+ auth.set_array_admin_password = lambda: None
+ auth.set_array_password = lambda: None
+ with self.assertRaisesRegexp(AnsibleExitJson, "No changes have been made."):
+ auth.apply()
diff --git a/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_client_certificate.py b/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_client_certificate.py
new file mode 100644
index 000000000..9541aeb8a
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_client_certificate.py
@@ -0,0 +1,373 @@
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import datetime
+import os
+from ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_client_certificate import NetAppESeriesClientCertificate
+from units.modules.utils import AnsibleFailJson, AnsibleExitJson, ModuleTestCase, set_module_args
+from units.compat import mock
+
+
+class NetAppESeriesClientCertificateTest(ModuleTestCase):
+
+ REQUIRED_PARAMS = {"api_username": "username",
+ "api_password": "password",
+ "api_url": "https://localhost:8443/devmgr/v2",
+ "ssid": "1", "validate_certs": "no"}
+
+ REQUEST_FUNC = "ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_client_certificate.NetAppESeriesClientCertificate.request"
+ LOAD_PEM_X509_CERTIFICATE = "ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_client_certificate.x509.load_pem_x509_certificate"
+ LOAD_DER_X509_CERTIFICATE = "ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_client_certificate.x509.load_der_x509_certificate"
+ BASE_REQUEST_FUNC = "ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity.request"
+
+ CERTIFICATE_PATH = "certificate.crt"
+ CERTIFICATE_CONTENT = """Certificate:
+ Data:
+ Version: 3 (0x2)
+ Serial Number: 1 (0x1)
+ Signature Algorithm: sha256WithRSAEncryption
+ Issuer: C=AU, ST=Florida, L=Palm City, O=Internet Widgits Pty Ltd
+ Validity
+ Not Before: Apr 1 19:30:07 2019 GMT
+ Not After : Mar 29 19:30:07 2029 GMT
+ Subject: C=AU, ST=Florida, O=Internet Widgits Pty Ltd, CN=test.example.com
+ Subject Public Key Info:
+ Public Key Algorithm: rsaEncryption
+ Public-Key: (2048 bit)
+ Modulus:
+ 00:ad:64:b5:4c:40:bb:0f:03:e8:2d:a3:76:af:14:
+ 49:b8:06:4a:f9:48:9b:ad:f2:69:55:42:b0:49:de:
+ cd:10:c3:37:71:1a:f8:e1:5e:88:61:b3:c3:0f:7a:
+ 3b:3e:eb:47:d3:7b:02:f9:40:6d:11:e9:c6:d0:05:
+ 3c:ab:d2:51:97:a3:c9:5d:e4:31:89:85:28:dd:96:
+ 75:c7:18:87:0e:a4:26:cb:bc:6d:2f:47:74:89:10:
+ a0:40:5c:39:4e:c2:52:bc:72:25:6c:30:48:dc:50:
+ 4e:c7:10:68:7f:96:ef:14:78:05:b3:53:5a:91:2a:
+ 8f:b0:5d:75:f0:85:b7:34:6f:78:43:44:a6:3c:4d:
+ 87:56:d0:fb:cf:53:de:50:f8:a7:70:89:68:52:83:
+ 87:32:70:da:cc:3f:d5:ae:f8:b4:8f:d9:de:40:b7:
+ 9a:15:c3:83:4b:62:73:d3:a9:e6:fe:2e:4a:33:7f:
+ 13:76:10:d5:d4:04:18:44:9c:b7:a8:17:3f:fe:4b:
+ 5d:d4:92:5e:9f:95:64:77:ef:1c:01:09:6a:a3:29:
+ 33:08:10:fa:5b:1c:ab:45:16:9d:ee:93:0b:90:d4:
+ ea:cf:0e:13:c8:73:d2:29:00:fa:c1:10:ed:20:66:
+ 4f:f5:a5:cf:8d:4e:2a:8e:4a:f2:8e:59:f1:a5:b6:
+ f5:87
+ Exponent: 65537 (0x10001)
+ X509v3 extensions:
+ X509v3 Basic Constraints:
+ CA:FALSE
+ Netscape Comment:
+ OpenSSL Generated Certificate
+ X509v3 Subject Key Identifier:
+ 08:21:10:B9:3E:A5:AF:63:02:88:F3:9D:77:74:FC:BB:AE:A0:BE:6F
+ X509v3 Authority Key Identifier:
+ keyid:B8:CC:D9:8C:03:C6:06:C3:C4:22:DD:04:64:70:79:0C:93:3F:5C:E8
+
+ Signature Algorithm: sha256WithRSAEncryption
+ 5b:9f:d8:f5:74:e0:66:56:99:62:d8:6f:c0:15:d9:fc:4f:8b:
+ 3d:ab:7a:a5:e0:55:49:62:fc:1f:d3:d1:71:4a:55:e9:a2:03:
+ 7b:57:8f:f2:e4:5b:9c:17:9e:e9:fe:4e:20:a7:48:87:e9:e8:
+ 80:e9:89:3c:4a:94:a2:68:6d:6d:b0:53:e3:9f:a5:dc:b9:cb:
+ 21:c3:b0:9f:1b:e1:32:8b:e3:cb:df:ba:32:bb:f4:fd:ef:83:
+ 9e:64:be:c4:37:4e:c2:90:65:60:3e:19:17:57:7f:59:9c:3d:
+ 8a:4b:4d:c6:42:ad:c4:98:d3:e1:88:74:3d:67:8b:6e:fd:85:
+ 1a:d0:ba:52:bc:24:bd:9e:74:82:d6:5f:8f:c7:2d:d8:04:b9:
+ fa:bd:e7:ef:5b:cf:d4:28:bf:c0:9a:6b:0c:7b:b7:3a:95:91:
+ 1c:f3:ad:5b:ce:48:cf:fa:c1:6e:82:f2:df:bd:ba:51:8e:00:
+ fb:86:b1:a6:a9:6a:5e:e4:e4:17:a2:35:b5:3c:fa:b1:4f:8d:
+ b7:24:53:0f:63:ac:16:f5:91:a0:15:e9:59:cd:59:55:28:a3:
+ d9:c0:70:74:30:5b:01:2a:e4:25:44:36:dd:74:f1:4a:3c:c3:
+ ad:52:51:c1:c7:79:7a:d7:21:23:a0:b6:55:c4:0d:27:40:10:
+ 4f:9c:db:04:f8:37:5a:4b:a1:9b:f2:78:b3:63:1a:c5:e3:6a:
+ a8:6d:c9:d5:73:41:91:c0:49:2c:72:32:43:73:f2:15:3e:c1:
+ 31:5d:91:b9:04:c1:78:a8:4e:cf:34:90:ee:05:f9:e5:ee:21:
+ 4c:1b:ae:55:fd:d8:c9:39:91:4c:5e:61:d9:72:10:a4:24:6a:
+ 20:c6:ad:44:0c:81:7a:ca:d5:fc:1c:6a:bf:52:9d:87:13:47:
+ dd:79:9e:6f:6e:03:be:06:7a:87:c9:5f:2d:f8:9f:c6:44:e6:
+ 05:c0:cd:28:17:2c:09:28:50:2b:12:39:ff:86:85:71:6b:f0:
+ cd:0f:4d:54:89:de:88:ee:fb:e8:e3:ba:45:97:9e:67:d6:ae:
+ 38:54:86:79:ca:fe:99:b4:20:25:d2:30:aa:3a:62:95:0f:dd:
+ 42:00:18:88:c7:1f:42:07:1d:dd:9c:42:c4:2f:56:c5:50:b1:
+ cd:6d:b9:36:df:9f:5d:f5:77:b3:cd:e4:b8:62:ed:2b:50:d0:
+ 0b:a2:31:0c:ae:20:8c:b4:0a:83:1f:20:3f:6c:d6:c7:bc:b6:
+ 84:ae:60:6e:69:2b:cb:01:22:55:a4:e5:3e:62:34:bd:20:f8:
+ 12:13:6f:25:8d:49:88:74:ba:61:51:bc:bc:8a:c6:fb:02:31:
+ ce:5b:85:df:55:d0:55:9b
+-----BEGIN CERTIFICATE-----
+MIIEqTCCApGgAwIBAgIBATANBgkqhkiG9w0BAQsFADBWMQswCQYDVQQGEwJBVTEQ
+MA4GA1UECAwHRmxvcmlkYTESMBAGA1UEBwwJUGFsbSBDaXR5MSEwHwYDVQQKDBhJ
+bnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwHhcNMTkwNDAxMTkzMDA3WhcNMjkwMzI5
+MTkzMDA3WjBdMQswCQYDVQQGEwJBVTEQMA4GA1UECAwHRmxvcmlkYTEhMB8GA1UE
+CgwYSW50ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMRkwFwYDVQQDDBB0ZXN0LmV4YW1w
+bGUuY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArWS1TEC7DwPo
+LaN2rxRJuAZK+UibrfJpVUKwSd7NEMM3cRr44V6IYbPDD3o7PutH03sC+UBtEenG
+0AU8q9JRl6PJXeQxiYUo3ZZ1xxiHDqQmy7xtL0d0iRCgQFw5TsJSvHIlbDBI3FBO
+xxBof5bvFHgFs1NakSqPsF118IW3NG94Q0SmPE2HVtD7z1PeUPincIloUoOHMnDa
+zD/Vrvi0j9neQLeaFcODS2Jz06nm/i5KM38TdhDV1AQYRJy3qBc//ktd1JJen5Vk
+d+8cAQlqoykzCBD6WxyrRRad7pMLkNTqzw4TyHPSKQD6wRDtIGZP9aXPjU4qjkry
+jlnxpbb1hwIDAQABo3sweTAJBgNVHRMEAjAAMCwGCWCGSAGG+EIBDQQfFh1PcGVu
+U1NMIEdlbmVyYXRlZCBDZXJ0aWZpY2F0ZTAdBgNVHQ4EFgQUCCEQuT6lr2MCiPOd
+d3T8u66gvm8wHwYDVR0jBBgwFoAUuMzZjAPGBsPEIt0EZHB5DJM/XOgwDQYJKoZI
+hvcNAQELBQADggIBAFuf2PV04GZWmWLYb8AV2fxPiz2reqXgVUli/B/T0XFKVemi
+A3tXj/LkW5wXnun+TiCnSIfp6IDpiTxKlKJobW2wU+Ofpdy5yyHDsJ8b4TKL48vf
+ujK79P3vg55kvsQ3TsKQZWA+GRdXf1mcPYpLTcZCrcSY0+GIdD1ni279hRrQulK8
+JL2edILWX4/HLdgEufq95+9bz9Qov8Caawx7tzqVkRzzrVvOSM/6wW6C8t+9ulGO
+APuGsaapal7k5BeiNbU8+rFPjbckUw9jrBb1kaAV6VnNWVUoo9nAcHQwWwEq5CVE
+Nt108Uo8w61SUcHHeXrXISOgtlXEDSdAEE+c2wT4N1pLoZvyeLNjGsXjaqhtydVz
+QZHASSxyMkNz8hU+wTFdkbkEwXioTs80kO4F+eXuIUwbrlX92Mk5kUxeYdlyEKQk
+aiDGrUQMgXrK1fwcar9SnYcTR915nm9uA74GeofJXy34n8ZE5gXAzSgXLAkoUCsS
+Of+GhXFr8M0PTVSJ3oju++jjukWXnmfWrjhUhnnK/pm0ICXSMKo6YpUP3UIAGIjH
+H0IHHd2cQsQvVsVQsc1tuTbfn131d7PN5Lhi7StQ0AuiMQyuIIy0CoMfID9s1se8
+toSuYG5pK8sBIlWk5T5iNL0g+BITbyWNSYh0umFRvLyKxvsCMc5bhd9V0FWb
+-----END CERTIFICATE-----"""
+ #
+ # {'expire_date': datetime.datetime(2029, 3, 29, 19, 30, 7),
+ # 'issuer_dn': [u'AU', u'Florida', u'Palm City', u'Internet Widgits Pty Ltd'],
+ # 'start_date': datetime.datetime(2019, 4, 1, 19, 30, 7),
+ # 'subject_dn': [u'AU', u'Florida', u'Internet Widgits Pty Ltd', u'test.example.com']})
+ #
+ CERTIFICATE_FINGERPRINT = b"4cb68a8039a54b2f5fbe4c55dabb92464a0149a9fce64eb779fd3211c482e44e"
+ GET_CERTIFICATE_RESPONSE_OLD = [
+ {"alias": "f869e886-4262-42de-87a6-8f99fc3e6272",
+ "subjectDN": "CN=test.example.com, O=Internet Widgits Pty Ltd, ST=Florida, C=AU",
+ "issuerDN": "O=Internet Widgits Pty Ltd, L=Palm City, ST=Florida, C=AU",
+ "start": "2019-04-01T19:30:07.000+0000", "expire": "2029-03-29T19:30:07.000+0000", "isUserInstalled": True},
+ {"alias": "ca2", "subjectDN": "sdn2", "issuerDN": "idn2",
+ "start": "2019-04-02T13:07:30.516Z", "expire": "2019-04-02T13:07:30.516Z", "isUserInstalled": False},
+ {"alias": "ca3", "subjectDN": "sdn3", "issuerDN": "idn3",
+ "start": "2019-04-02T13:07:30.516Z", "expire": "2019-04-02T13:07:30.516Z", "isUserInstalled": False},
+ {"alias": "ca4", "subjectDN": "sdn4", "issuerDN": "idn4",
+ "start": "2019-04-02T13:07:30.516Z", "expire": "2019-04-02T13:07:30.516Z", "isUserInstalled": False}]
+ GET_CERTIFICATE_RESPONSE = [
+ {'alias': 'alias1', 'expire': '2019-04-02T13:46:04.285Z', 'isKeyEntry': True, 'isUserInstalled': True,
+ 'issuerDN': 'string', 'issuerRdns': [{'attributes': [{'name': 'string', 'value': 'string'}]}],
+ 'sha256Fingerprint': b'4cb68a8039a54b2f5fbe4c55dabb92464a0149a9fce64eb779fd3211c482e44e',
+ 'shaFingerprint': b'4cb68a8039a54b2f5fbe4c55dabb92464a0149a9fce64eb779fd3211c482e44e',
+ 'start': '2019-04-02T13:46:04.285Z', 'status': 'trusted', 'subjectDN': 'string',
+ 'subjectRdns': [{'attributes': [{'name': 'string', 'value': 'string'}]}], 'truststore': True, 'type': 'selfSigned'},
+ {"alias": "alias1", "shaFingerprint": CERTIFICATE_FINGERPRINT, "sha256Fingerprint": CERTIFICATE_FINGERPRINT,
+ "subjectDN": "string", "subjectRdns": [{"attributes": [{"name": "string", "value": "string"}]}],
+ "issuerDN": "string", "issuerRdns": [{"attributes": [{"name": "string", "value": "string"}]}],
+ "start": "2019-04-02T13:46:04.285Z", "expire": "2019-04-02T13:46:04.285Z", "status": "trusted",
+ "truststore": True, "isUserInstalled": True, "isKeyEntry": True, "type": "selfSigned"},
+ {"alias": "alias1", "shaFingerprint": "123412341234", "sha256Fingerprint": "4567345673456",
+ "subjectDN": "string", "subjectRdns": [{"attributes": [{"name": "string", "value": "string"}]}],
+ "issuerDN": "string", "issuerRdns": [{"attributes": [{"name": "string", "value": "string"}]}],
+ "start": "2019-04-02T13:46:04.285Z", "expire": "2019-04-02T13:46:04.285Z", "status": "trusted",
+ "truststore": True, "isUserInstalled": True, "isKeyEntry": True, "type": "selfSigned"}
+ ]
+
+ def _set_args(self, args=None):
+ module_args = self.REQUIRED_PARAMS.copy()
+ if args is not None:
+ module_args.update(args)
+ set_module_args(module_args)
+
+ if not os.path.exists(self.CERTIFICATE_PATH):
+ with open(self.CERTIFICATE_PATH, "w") as fh:
+ fh.write(self.CERTIFICATE_CONTENT)
+
+ def test_init_url_path_prefix(self):
+ """Verify url path prefix for both embedded and proxy scenarios."""
+ self._set_args({"certificates": [self.CERTIFICATE_PATH]})
+ with mock.patch(self.BASE_REQUEST_FUNC, side_effect=[(200, {"version": "03.00.0000.0000"}), (200, {"runningAsProxy": False})]):
+ certificate = NetAppESeriesClientCertificate()
+ self.assertEquals(certificate.url_path_prefix, "")
+
+ self._set_args({"certificates": [self.CERTIFICATE_PATH]})
+ with mock.patch(self.BASE_REQUEST_FUNC, side_effect=[(200, {"version": "03.00.0000.0000"}), (200, {"runningAsProxy": True})]):
+ certificate = NetAppESeriesClientCertificate()
+ self.assertEquals(certificate.url_path_prefix, "storage-systems/1/forward/devmgr/v2/")
+
+ self._set_args({"ssid": "0", "certificates": [self.CERTIFICATE_PATH]})
+ with mock.patch(self.BASE_REQUEST_FUNC, side_effect=[(200, {"version": "03.00.0000.0000"}), (200, {"runningAsProxy": True})]):
+ certificate = NetAppESeriesClientCertificate()
+ self.assertEquals(certificate.url_path_prefix, "")
+
+ self._set_args({"ssid": "PROXY", "certificates": [self.CERTIFICATE_PATH]})
+ with mock.patch(self.BASE_REQUEST_FUNC, side_effect=[(200, {"version": "03.00.0000.0000"}), (200, {"runningAsProxy": True})]):
+ certificate = NetAppESeriesClientCertificate()
+ self.assertEquals(certificate.url_path_prefix, "")
+
+ def test_certificate_info_pass(self):
+ """Determine whether certificate_info returns expected results."""
+ self._set_args({"certificates": [self.CERTIFICATE_PATH]})
+ with mock.patch(self.BASE_REQUEST_FUNC, side_effect=[(200, {"version": "03.00.0000.0000"}), (200, {"runningAsProxy": False})]):
+ certificate = NetAppESeriesClientCertificate()
+ self.assertEquals(certificate.certificate_info(self.CERTIFICATE_PATH),
+ {"start_date": datetime.datetime(2019, 4, 1, 19, 30, 7),
+ "expire_date": datetime.datetime(2029, 3, 29, 19, 30, 7),
+ "subject_dn": ["AU", "Florida", "Internet Widgits Pty Ltd", "test.example.com"],
+ "issuer_dn": ["AU", "Florida", "Palm City", "Internet Widgits Pty Ltd"]})
+
+ def test_certificate_info_fail(self):
+ """Determine wehther certificate_info throws expected exceptions."""
+ self._set_args({"certificates": [self.CERTIFICATE_PATH]})
+ with mock.patch(self.BASE_REQUEST_FUNC, side_effect=[(200, {"version": "03.00.0000.0000"}), (200, {"runningAsProxy": False})]):
+ certificate = NetAppESeriesClientCertificate()
+ with self.assertRaisesRegexp(AnsibleFailJson, r"Failed to load certificate."):
+ with mock.patch(self.LOAD_PEM_X509_CERTIFICATE, side_effect=Exception()):
+ with mock.patch(self.LOAD_DER_X509_CERTIFICATE, side_effect=Exception()):
+ certificate.certificate_info(self.CERTIFICATE_PATH)
+
+ self._set_args({"certificates": [self.CERTIFICATE_PATH]})
+ with mock.patch(self.BASE_REQUEST_FUNC, side_effect=[(200, {"version": "03.00.0000.0000"}), (200, {"runningAsProxy": False})]):
+ certificate = NetAppESeriesClientCertificate()
+ with self.assertRaisesRegexp(AnsibleFailJson, r"Failed to open certificate file or invalid certificate object type."):
+ with mock.patch(self.LOAD_PEM_X509_CERTIFICATE, return_value=None):
+ certificate.certificate_info(self.CERTIFICATE_PATH)
+
+ def test_certificate_fingerprint_pass(self):
+ """Determine whether certificate_fingerprint returns expected results."""
+ self._set_args({"certificates": [self.CERTIFICATE_PATH]})
+ with mock.patch(self.BASE_REQUEST_FUNC, side_effect=[(200, {"version": "03.00.0000.0000"}), (200, {"runningAsProxy": False})]):
+ certificate = NetAppESeriesClientCertificate()
+ self.assertEquals(certificate.certificate_fingerprint(self.CERTIFICATE_PATH), "4cb68a8039a54b2f5fbe4c55dabb92464a0149a9fce64eb779fd3211c482e44e")
+
+ def test_certificate_fingerprint_fail(self):
+ """Determine whether certificate_fingerprint throws expected exceptions."""
+ self._set_args({"certificates": [self.CERTIFICATE_PATH]})
+ with mock.patch(self.BASE_REQUEST_FUNC, side_effect=[(200, {"version": "03.00.0000.0000"}), (200, {"runningAsProxy": False})]):
+ certificate = NetAppESeriesClientCertificate()
+ with self.assertRaisesRegexp(AnsibleFailJson, r"Failed to determine certificate fingerprint."):
+ with mock.patch(self.LOAD_PEM_X509_CERTIFICATE, side_effect=Exception()):
+ with mock.patch(self.LOAD_DER_X509_CERTIFICATE, side_effect=Exception()):
+ certificate.certificate_fingerprint(self.CERTIFICATE_PATH)
+
+ def test_determine_changes_pass(self):
+ """Determine whether determine_changes successful return expected results."""
+ self._set_args({"certificates": [self.CERTIFICATE_PATH]})
+ with mock.patch(self.BASE_REQUEST_FUNC, side_effect=[(200, {"version": "03.00.0000.0000"}), (200, {"runningAsProxy": False})]):
+ certificate = NetAppESeriesClientCertificate()
+ with mock.patch(self.REQUEST_FUNC, return_value=(200, self.GET_CERTIFICATE_RESPONSE)):
+ certificate.determine_changes()
+ self.assertEquals(certificate.add_certificates, ["certificate.crt"])
+ # self.assertEquals(certificate.remove_certificates, [])
+
+ self._set_args({"certificates": [self.CERTIFICATE_PATH]})
+ with mock.patch(self.BASE_REQUEST_FUNC, side_effect=[(200, {"version": "03.00.0000.0000"}), (200, {"runningAsProxy": False})]):
+ certificate = NetAppESeriesClientCertificate()
+ with mock.patch(self.REQUEST_FUNC, side_effect=[(404, None), (200, self.GET_CERTIFICATE_RESPONSE_OLD)]):
+ certificate.determine_changes()
+ self.assertEquals(certificate.add_certificates, [])
+ # self.assertEquals(certificate.remove_certificates, [])
+
+ self._set_args({"certificates": []})
+ with mock.patch(self.BASE_REQUEST_FUNC, side_effect=[(200, {"version": "03.00.0000.0000"}), (200, {"runningAsProxy": False})]):
+ certificate = NetAppESeriesClientCertificate()
+ with mock.patch(self.REQUEST_FUNC, side_effect=[(404, None), (200, self.GET_CERTIFICATE_RESPONSE_OLD)]):
+ certificate.determine_changes()
+ self.assertEquals(certificate.add_certificates, [])
+ self.assertEquals(certificate.remove_certificates, [self.GET_CERTIFICATE_RESPONSE_OLD[0]])
+
+ def test_determine_changes_fail(self):
+ """Determine whether determine_changes throws expected exceptions."""
+ self._set_args({"certificates": [self.CERTIFICATE_PATH]})
+ with mock.patch(self.BASE_REQUEST_FUNC, side_effect=[(200, {"version": "03.00.0000.0000"}), (200, {"runningAsProxy": False})]):
+ certificate = NetAppESeriesClientCertificate()
+ with self.assertRaisesRegexp(AnsibleFailJson, r"Failed to retrieve remote server certificates."):
+ with mock.patch(self.REQUEST_FUNC, return_value=(300, [])):
+ certificate.determine_changes()
+
+ self._set_args({"certificates": [self.CERTIFICATE_PATH]})
+ with mock.patch(self.BASE_REQUEST_FUNC, side_effect=[(200, {"version": "03.00.0000.0000"}), (200, {"runningAsProxy": False})]):
+ certificate = NetAppESeriesClientCertificate()
+ with self.assertRaisesRegexp(AnsibleFailJson, r"Failed to retrieve remote server certificates."):
+ with mock.patch(self.REQUEST_FUNC, side_effect=[(404, None), (300, [])]):
+ certificate.determine_changes()
+
+ def test_upload_certificate_pass(self):
+ """Validate upload_certificate successfully completes"""
+ self._set_args({"certificates": [self.CERTIFICATE_PATH]})
+ with mock.patch(self.BASE_REQUEST_FUNC, side_effect=[(200, {"version": "03.00.0000.0000"}), (200, {"runningAsProxy": False})]):
+ certificate = NetAppESeriesClientCertificate()
+ with mock.patch(self.REQUEST_FUNC, return_value=(200, [])):
+ certificate.upload_certificate(self.CERTIFICATE_PATH)
+
+ self._set_args({"certificates": [self.CERTIFICATE_PATH]})
+ with mock.patch(self.BASE_REQUEST_FUNC, side_effect=[(200, {"version": "03.00.0000.0000"}), (200, {"runningAsProxy": False})]):
+ certificate = NetAppESeriesClientCertificate()
+ with mock.patch(self.REQUEST_FUNC, side_effect=[(404, None), (200, [])]):
+ certificate.upload_certificate(self.CERTIFICATE_PATH)
+
+ def test_upload_certificate_fail(self):
+ """Validate upload_certificate successfully completes"""
+ self._set_args({"certificates": [self.CERTIFICATE_PATH]})
+ with mock.patch(self.BASE_REQUEST_FUNC, side_effect=[(200, {"version": "03.00.0000.0000"}), (200, {"runningAsProxy": False})]):
+ certificate = NetAppESeriesClientCertificate()
+ with self.assertRaisesRegexp(AnsibleFailJson, r"Failed to upload certificate."):
+ with mock.patch(self.REQUEST_FUNC, return_value=(300, [])):
+ certificate.upload_certificate(self.CERTIFICATE_PATH)
+
+ self._set_args({"certificates": [self.CERTIFICATE_PATH]})
+ with mock.patch(self.BASE_REQUEST_FUNC, side_effect=[(200, {"version": "03.00.0000.0000"}), (200, {"runningAsProxy": False})]):
+ certificate = NetAppESeriesClientCertificate()
+ with self.assertRaisesRegexp(AnsibleFailJson, r"Failed to upload certificate."):
+ with mock.patch(self.REQUEST_FUNC, side_effect=[(404, None), (300, [])]):
+ certificate.upload_certificate(self.CERTIFICATE_PATH)
+
+ def test_delete_certificate_pass(self):
+ """Validate delete_certificate successfully completes"""
+ self._set_args({"certificates": [self.CERTIFICATE_PATH]})
+ with mock.patch(self.BASE_REQUEST_FUNC, side_effect=[(200, {"version": "03.00.0000.0000"}), (200, {"runningAsProxy": False})]):
+ certificate = NetAppESeriesClientCertificate()
+ with mock.patch(self.REQUEST_FUNC, return_value=(200, [])):
+ certificate.delete_certificate({"alias": "alias1"})
+
+ self._set_args({"certificates": [self.CERTIFICATE_PATH]})
+ with mock.patch(self.BASE_REQUEST_FUNC, side_effect=[(200, {"version": "03.00.0000.0000"}), (200, {"runningAsProxy": False})]):
+ certificate = NetAppESeriesClientCertificate()
+ with mock.patch(self.REQUEST_FUNC, side_effect=[(404, None), (200, [])]):
+ certificate.delete_certificate({"alias": "alias1"})
+
+ def test_delete_certificate_fail(self):
+ """Validate delete_certificate successfully completes"""
+ self._set_args({"certificates": [self.CERTIFICATE_PATH]})
+ with mock.patch(self.BASE_REQUEST_FUNC, side_effect=[(200, {"version": "03.00.0000.0000"}), (200, {"runningAsProxy": False})]):
+ certificate = NetAppESeriesClientCertificate()
+ with self.assertRaisesRegexp(AnsibleFailJson, r"Failed to delete certificate."):
+ with mock.patch(self.REQUEST_FUNC, return_value=(300, [])):
+ certificate.delete_certificate({"alias": "alias1"})
+
+ self._set_args({"certificates": [self.CERTIFICATE_PATH]})
+ with mock.patch(self.BASE_REQUEST_FUNC, side_effect=[(200, {"version": "03.00.0000.0000"}), (200, {"runningAsProxy": False})]):
+ certificate = NetAppESeriesClientCertificate()
+ with self.assertRaisesRegexp(AnsibleFailJson, r"Failed to delete certificate."):
+ with mock.patch(self.REQUEST_FUNC, side_effect=[(404, None), (300, [])]):
+ certificate.delete_certificate({"alias": "alias1"})
+
+ def test_apply_pass(self):
+ """Verify apply functions as expected."""
+ self._set_args({"certificates": [self.CERTIFICATE_PATH]})
+ with mock.patch(self.BASE_REQUEST_FUNC, side_effect=[(200, {"version": "03.00.0000.0000"}), (200, {"runningAsProxy": False})]):
+ certificate = NetAppESeriesClientCertificate()
+ certificate.determine_changes = lambda: None
+ certificate.delete_certificate = lambda x: None
+ certificate.upload_certificate = lambda x: None
+
+ certificate.remove_certificates = []
+ certificate.add_certificates = []
+ certificate.module.check_mode = False
+ with self.assertRaises(AnsibleExitJson):
+ certificate.apply()
+
+ certificate.remove_certificates = []
+ certificate.add_certificates = []
+ certificate.module.check_mode = True
+ with self.assertRaises(AnsibleExitJson):
+ certificate.apply()
+
+ certificate.remove_certificates = [True]
+ certificate.add_certificates = []
+ certificate.module.check_mode = False
+ with self.assertRaises(AnsibleExitJson):
+ certificate.apply()
+
+ certificate.remove_certificates = []
+ certificate.add_certificates = [True]
+ certificate.module.check_mode = False
+ with self.assertRaises(AnsibleExitJson):
+ certificate.apply()
diff --git a/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_discover.py b/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_discover.py
new file mode 100644
index 000000000..5dc390ede
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_discover.py
@@ -0,0 +1,168 @@
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_discover import NetAppESeriesDiscover
+from units.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args
+from units.compat import mock
+
+
+class AlertsTest(ModuleTestCase):
+ REQUIRED_PARAMS = {"subnet_mask": "192.168.1.0/24"}
+ BASE_REQ_FUNC = 'ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_discover.request'
+ SLEEP_FUNC = 'ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_discover.sleep'
+
+ def _set_args(self, args=None):
+ module_args = self.REQUIRED_PARAMS.copy()
+ if args is not None:
+ module_args.update(args)
+ set_module_args(module_args)
+
+ def test_valid_options_pass(self):
+ """Verify constructor accepts valid options."""
+ options_list = [{"ports": [1, 8443]},
+ {"ports": [8080, 65535]},
+ {"ports": [8443], "proxy_url": "https://192.168.1.1:8443/devmgr/v2/", "proxy_username": "admin", "proxy_password": "adminpass"},
+ {"ports": [8443], "proxy_url": "https://192.168.1.1:8443/devmgr/v2/", "proxy_username": "admin", "proxy_password": "adminpass",
+ "prefer_embedded": True},
+ {"ports": [8443], "proxy_url": "https://192.168.1.1:8443/devmgr/v2/", "proxy_username": "admin", "proxy_password": "adminpass",
+ "prefer_embedded": False},
+ {"ports": [8443], "proxy_url": "https://192.168.1.1:8443/devmgr/v2/", "proxy_username": "admin", "proxy_password": "adminpass",
+ "proxy_validate_certs": True},
+ {"ports": [8443], "proxy_url": "https://192.168.1.1:8443/devmgr/v2/", "proxy_username": "admin", "proxy_password": "adminpass",
+ "proxy_validate_certs": False}]
+
+ for options in options_list:
+ self._set_args(options)
+ discover = NetAppESeriesDiscover()
+
+ def test_valid_options_fail(self):
+ """Verify constructor throws expected exceptions."""
+ options_list = [{"ports": [0, 8443]}, {"ports": [8080, 65536]}, {"ports": [8080, "port"]}, {"ports": [8080, -10]}, {"ports": [8080, 70000]}]
+
+ for options in options_list:
+ self._set_args(options)
+ with self.assertRaisesRegexp(AnsibleFailJson, "Invalid port! Ports must be positive numbers between 0 and 65536."):
+ discover = NetAppESeriesDiscover()
+
+ def test_check_ip_address_pass(self):
+ """Verify check_ip_address successfully completes."""
+ self._set_args()
+ with mock.patch(self.BASE_REQ_FUNC, return_value=(200, {"chassisSerialNumber": "012345678901", "storageArrayLabel": "array_label"})):
+ discover = NetAppESeriesDiscover()
+ discover.check_ip_address(discover.systems_found, "192.168.1.100")
+ self.assertEqual(discover.systems_found, {"012345678901": {"api_urls": ["https://192.168.1.100:8443/devmgr/v2/storage-systems/1/"],
+ "label": "array_label", "addresses": [], "proxy_required": False}})
+
+ self._set_args({"ports": [8080, 8443]})
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(404, None), (401, None), (200, {"sa": {"saData": {"chassisSerialNumber": "012345678901",
+ "storageArrayLabel": "array_label"}}})]):
+ discover = NetAppESeriesDiscover()
+ discover.check_ip_address(discover.systems_found, "192.168.1.101")
+ self.assertEqual(discover.systems_found, {"012345678901": {"api_urls": ["https://192.168.1.101:8443/devmgr/v2/storage-systems/1/"],
+ "label": "array_label", "addresses": [], "proxy_required": False}})
+
+ def test_no_proxy_discover_pass(self):
+ """Verify no_proxy_discover completes successfully."""
+ self._set_args()
+ discover = NetAppESeriesDiscover()
+ discover.check_ip_address = lambda: None
+ discover.no_proxy_discover()
+
+ def test_verify_proxy_service_pass(self):
+ """Verify verify_proxy_service completes successfully."""
+ self._set_args({"proxy_url": "https://192.168.1.200", "proxy_username": "admin", "proxy_password": "adminpass"})
+ discover = NetAppESeriesDiscover()
+ with mock.patch(self.BASE_REQ_FUNC, return_value=(200, {"runningAsProxy": True})):
+ discover.verify_proxy_service()
+
+ def test_verify_proxy_service_fail(self):
+ """Verify verify_proxy_service throws expected exception."""
+ self._set_args({"proxy_url": "https://192.168.1.200", "proxy_username": "admin", "proxy_password": "adminpass"})
+ discover = NetAppESeriesDiscover()
+ with self.assertRaisesRegexp(AnsibleFailJson, "Web Services is not running as a proxy!"):
+ with mock.patch(self.BASE_REQ_FUNC, return_value=(200, {"runningAsProxy": False})):
+ discover.verify_proxy_service()
+
+ self._set_args({"proxy_url": "https://192.168.1.200", "proxy_username": "admin", "proxy_password": "adminpass"})
+ discover = NetAppESeriesDiscover()
+ with self.assertRaisesRegexp(AnsibleFailJson, "Proxy is not available! Check proxy_url."):
+ with mock.patch(self.BASE_REQ_FUNC, return_value=Exception()):
+ discover.verify_proxy_service()
+
+ def test_test_systems_found_pass(self):
+ """Verify test_systems_found adds to systems_found dictionary."""
+ self._set_args({"proxy_url": "https://192.168.1.200", "proxy_username": "admin", "proxy_password": "adminpass", "prefer_embedded": True})
+ discover = NetAppESeriesDiscover()
+ with mock.patch(self.BASE_REQ_FUNC, return_value=(200, {"runningAsProxy": True})):
+ discover.test_systems_found(discover.systems_found, "012345678901", "array_label", ["192.168.1.100", "192.168.1.102"])
+ self.assertEqual(discover.systems_found, {"012345678901": {"api_urls": ["https://192.168.1.100:8443/devmgr/v2/",
+ "https://192.168.1.102:8443/devmgr/v2/"],
+ "label": "array_label",
+ "addresses": ["192.168.1.100", "192.168.1.102"],
+ "proxy_required": False}})
+
+ def test_proxy_discover_pass(self):
+ """Verify proxy_discover completes successfully."""
+ self._set_args({"subnet_mask": "192.168.1.0/30", "proxy_url": "https://192.168.1.200", "proxy_username": "admin", "proxy_password": "adminpass"})
+ discover = NetAppESeriesDiscover()
+ discover.verify_proxy_service = lambda: None
+ with mock.patch(self.SLEEP_FUNC, return_value=None):
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"requestId": "1"}), (200, {"discoverProcessRunning": True}),
+ (200, {"discoverProcessRunning": False,
+ "storageSystems": [{"controllers": [{"ipAddresses": ["192.168.1.100", "192.168.1.102"]}],
+ "supportedManagementPorts": ["https"], "serialNumber": "012345678901",
+ "label": "array_label"}]})]):
+ discover.proxy_discover()
+
+ self._set_args({"subnet_mask": "192.168.1.0/30", "proxy_url": "https://192.168.1.200", "proxy_username": "admin", "proxy_password": "adminpass"})
+ discover = NetAppESeriesDiscover()
+ discover.verify_proxy_service = lambda: None
+ with mock.patch(self.SLEEP_FUNC, return_value=None):
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"requestId": "1"}), (200, {"discoverProcessRunning": True}),
+ (200, {"discoverProcessRunning": False,
+ "storageSystems": [{"controllers": [{"ipAddresses": ["192.168.1.100", "192.168.1.102"]}],
+ "supportedManagementPorts": [], "serialNumber": "012345678901",
+ "label": "array_label"}]})]):
+ discover.proxy_discover()
+
+ def test_proxy_discover_fail(self):
+ """Verify proxy_discover throws expected exceptions."""
+ self._set_args({"subnet_mask": "192.168.1.0/30", "proxy_url": "https://192.168.1.200", "proxy_username": "admin", "proxy_password": "adminpass"})
+ discover = NetAppESeriesDiscover()
+ discover.verify_proxy_service = lambda: None
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to initiate array discovery."):
+ with mock.patch(self.SLEEP_FUNC, return_value=None):
+ with mock.patch(self.BASE_REQ_FUNC, return_value=Exception()):
+ discover.proxy_discover()
+
+ self._set_args({"subnet_mask": "192.168.1.0/30", "proxy_url": "https://192.168.1.200", "proxy_username": "admin", "proxy_password": "adminpass"})
+ discover = NetAppESeriesDiscover()
+ discover.verify_proxy_service = lambda: None
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to get the discovery results."):
+ with mock.patch(self.SLEEP_FUNC, return_value=None):
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"requestId": "1"}), Exception()]):
+ discover.proxy_discover()
+
+ self._set_args({"subnet_mask": "192.168.1.0/30", "proxy_url": "https://192.168.1.200", "proxy_username": "admin", "proxy_password": "adminpass"})
+ discover = NetAppESeriesDiscover()
+ discover.verify_proxy_service = lambda: None
+ with self.assertRaisesRegexp(AnsibleFailJson, "Timeout waiting for array discovery process."):
+ with mock.patch(self.SLEEP_FUNC, return_value=None):
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"requestId": "1"})] + [(200, {"discoverProcessRunning": True})] * 300):
+ discover.proxy_discover()
+
+ def test_discover_pass(self):
+ """Verify discover successfully completes."""
+ self._set_args({"subnet_mask": "192.168.1.0/30", "proxy_url": "https://192.168.1.200", "proxy_username": "admin", "proxy_password": "adminpass"})
+ discover = NetAppESeriesDiscover()
+ discover.proxy_discover = lambda: None
+ with self.assertRaisesRegexp(AnsibleExitJson, "Discover process complete."):
+ discover.discover()
+
+ self._set_args()
+ discover = NetAppESeriesDiscover()
+ discover.no_proxy_discover = lambda: None
+ with self.assertRaisesRegexp(AnsibleExitJson, "Discover process complete."):
+ discover.discover()
diff --git a/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_drive_firmware.py b/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_drive_firmware.py
new file mode 100644
index 000000000..b59bd0acd
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_drive_firmware.py
@@ -0,0 +1,212 @@
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_drive_firmware import NetAppESeriesDriveFirmware
+from units.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args
+from units.compat import mock
+
+
+class HostTest(ModuleTestCase):
+ REQUIRED_PARAMS = {"api_username": "rw",
+ "api_password": "password",
+ "api_url": "http://localhost",
+ "ssid": "1"}
+
+ REQUEST_FUNC = "ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_drive_firmware.NetAppESeriesDriveFirmware.request"
+ CREATE_MULTIPART_FORMDATA_FUNC = "ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_drive_firmware.create_multipart_formdata"
+ SLEEP_FUNC = "ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_drive_firmware.sleep"
+ UPGRADE_LIST_RESPONSE = ({"filename": "test_drive_firmware_1",
+ "driveRefList": ["010000005000C5007EDE4ECF0000000000000000",
+ "010000005000C5007EDF9AAB0000000000000000",
+ "010000005000C5007EDBE3C70000000000000000"]},
+ {"filename": "test_drive_firmware_2",
+ "driveRefList": ["010000005000C5007EDE4ECF0000000000000001",
+ "010000005000C5007EDF9AAB0000000000000001",
+ "010000005000C5007EDBE3C70000000000000001"]})
+
+ FIRMWARE_DRIVES_RESPONSE = {"compatibilities": [
+ {"filename": "test_drive_firmware_1",
+ "firmwareVersion": "MS02",
+ "supportedFirmwareVersions": ["MSB6", "MSB8", "MS00", "MS02"],
+ "compatibleDrives": [{"driveRef": "010000005000C5007EDE4ECF0000000000000000", "onlineUpgradeCapable": True},
+ {"driveRef": "010000005000C5007EDF9AAB0000000000000000", "onlineUpgradeCapable": True},
+ {"driveRef": "010000005000C5007EDBE3C70000000000000000", "onlineUpgradeCapable": True}]},
+ {"filename": "test_drive_firmware_2",
+ "firmwareVersion": "MS01",
+ "supportedFirmwareVersions": ["MSB8", "MS00", "MS01"],
+ "compatibleDrives": [{"driveRef": "010000005000C5007EDE4ECF0000000000000001", "onlineUpgradeCapable": True},
+ {"driveRef": "010000005000C5007EDF9AAB0000000000000001", "onlineUpgradeCapable": False},
+ {"driveRef": "010000005000C5007EDBE3C70000000000000001", "onlineUpgradeCapable": True}]}]}
+
+ def _set_args(self, args):
+ module_args = self.REQUIRED_PARAMS.copy()
+ module_args.update(args)
+ set_module_args(module_args)
+
+ def test_upload_firmware(self):
+ """Verify exception is thrown"""
+ self._set_args({"firmware": ["path_to_test_drive_firmware_1", "path_to_test_drive_firmware_2"]})
+ firmware_object = NetAppESeriesDriveFirmware()
+
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to upload drive firmware"):
+ with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
+ with mock.patch(self.CREATE_MULTIPART_FORMDATA_FUNC, return_value=("", {})):
+ firmware_object.upload_firmware()
+
+ def test_upgrade_list_pass(self):
+ """Verify upgrade_list method pass"""
+ side_effects = [(200, self.FIRMWARE_DRIVES_RESPONSE),
+ (200, {"offline": False, "available": True, "firmwareVersion": "MS00"}),
+ (200, {"offline": False, "available": True, "firmwareVersion": "MS01"}),
+ (200, {"offline": False, "available": True, "firmwareVersion": "MS02"})]
+ self._set_args({"firmware": ["path/to/test_drive_firmware_1"]})
+ firmware_object = NetAppESeriesDriveFirmware()
+ with mock.patch(self.REQUEST_FUNC, side_effect=side_effects):
+ self.assertEqual(firmware_object.upgrade_list(), [{"driveRefList": ["010000005000C5007EDE4ECF0000000000000000",
+ "010000005000C5007EDF9AAB0000000000000000"],
+ "filename": "test_drive_firmware_1"}])
+
+ side_effects = [(200, self.FIRMWARE_DRIVES_RESPONSE),
+ (200, {"offline": False, "available": True, "firmwareVersion": "MS02"}),
+ (200, {"offline": False, "available": True, "firmwareVersion": "MS02"}),
+ (200, {"offline": False, "available": True, "firmwareVersion": "MS02"})]
+ self._set_args({"firmware": ["path/to/test_drive_firmware_1"]})
+ firmware_object = NetAppESeriesDriveFirmware()
+ with mock.patch(self.REQUEST_FUNC, side_effect=side_effects):
+ self.assertEqual(firmware_object.upgrade_list(), [])
+
+ def test_upgrade_list_fail(self):
+ """Verify upgrade_list method throws expected exceptions."""
+ self._set_args({"firmware": ["path_to_test_drive_firmware_1"]})
+ firmware_object = NetAppESeriesDriveFirmware()
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to complete compatibility and health check."):
+ with mock.patch(self.REQUEST_FUNC, response=Exception()):
+ firmware_object.upgrade_list()
+
+ side_effects = [(200, self.FIRMWARE_DRIVES_RESPONSE),
+ (200, {"offline": False, "available": True, "firmwareVersion": "MS01"}),
+ (200, {"offline": False, "available": True, "firmwareVersion": "MS00"}),
+ Exception()]
+ self._set_args({"firmware": ["path/to/test_drive_firmware_1"]})
+ firmware_object = NetAppESeriesDriveFirmware()
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to retrieve drive information."):
+ with mock.patch(self.REQUEST_FUNC, side_effect=side_effects):
+ firmware_object.upgrade_list()
+
+ side_effects = [(200, self.FIRMWARE_DRIVES_RESPONSE),
+ (200, {"offline": False, "available": True, "firmwareVersion": "MS01"}),
+ (200, {"offline": False, "available": True, "firmwareVersion": "MS00"}),
+ (200, {"offline": False, "available": True, "firmwareVersion": "MS00"})]
+ self._set_args({"firmware": ["path/to/test_drive_firmware_2"], "upgrade_drives_online": True})
+ firmware_object = NetAppESeriesDriveFirmware()
+ with self.assertRaisesRegexp(AnsibleFailJson, "Drive is not capable of online upgrade."):
+ with mock.patch(self.REQUEST_FUNC, side_effect=side_effects):
+ firmware_object.upgrade_list()
+
+ def test_wait_for_upgrade_completion_pass(self):
+ """Verify function waits for okay status."""
+ self._set_args({"firmware": ["path/to/test_drive_firmware_1", "path/to/test_drive_firmware_2"], "wait_for_completion": True})
+ firmware_object = NetAppESeriesDriveFirmware()
+ firmware_object.upgrade_drives_online = True
+ firmware_object.upgrade_list = lambda: self.UPGRADE_LIST_RESPONSE
+ with mock.patch(self.SLEEP_FUNC, return_value=None):
+ with mock.patch(self.REQUEST_FUNC, side_effect=[
+ (200, {"driveStatus": [{"driveRef": "010000005000C5007EDE4ECF0000000000000000", "status": "inProgress"},
+ {"driveRef": "010000005000C5007EDF9AAB0000000000000000", "status": "okay"},
+ {"driveRef": "010000005000C5007EDBE3C70000000000000000", "status": "okay"},
+ {"driveRef": "010000005000C5007EDE4ECF0000000000000001", "status": "okay"},
+ {"driveRef": "010000005000C5007EDF9AAB0000000000000001", "status": "okay"},
+ {"driveRef": "010000005000C5007EDBE3C70000000000000001", "status": "okay"}]}),
+ (200, {"driveStatus": [{"driveRef": "010000005000C5007EDE4ECF0000000000000000", "status": "okay"},
+ {"driveRef": "010000005000C5007EDF9AAB0000000000000000", "status": "inProgressRecon"},
+ {"driveRef": "010000005000C5007EDBE3C70000000000000000", "status": "okay"},
+ {"driveRef": "010000005000C5007EDE4ECF0000000000000001", "status": "okay"},
+ {"driveRef": "010000005000C5007EDF9AAB0000000000000001", "status": "okay"},
+ {"driveRef": "010000005000C5007EDBE3C70000000000000001", "status": "okay"}]}),
+ (200, {"driveStatus": [{"driveRef": "010000005000C5007EDE4ECF0000000000000000", "status": "okay"},
+ {"driveRef": "010000005000C5007EDF9AAB0000000000000000", "status": "okay"},
+ {"driveRef": "010000005000C5007EDBE3C70000000000000000", "status": "pending"},
+ {"driveRef": "010000005000C5007EDE4ECF0000000000000001", "status": "okay"},
+ {"driveRef": "010000005000C5007EDF9AAB0000000000000001", "status": "okay"},
+ {"driveRef": "010000005000C5007EDBE3C70000000000000001", "status": "okay"}]}),
+ (200, {"driveStatus": [{"driveRef": "010000005000C5007EDE4ECF0000000000000000", "status": "okay"},
+ {"driveRef": "010000005000C5007EDF9AAB0000000000000000", "status": "okay"},
+ {"driveRef": "010000005000C5007EDBE3C70000000000000000", "status": "okay"},
+ {"driveRef": "010000005000C5007EDE4ECF0000000000000001", "status": "notAttempted"},
+ {"driveRef": "010000005000C5007EDF9AAB0000000000000001", "status": "okay"},
+ {"driveRef": "010000005000C5007EDBE3C70000000000000001", "status": "okay"}]}),
+ (200, {"driveStatus": [{"driveRef": "010000005000C5007EDE4ECF0000000000000000", "status": "okay"},
+ {"driveRef": "010000005000C5007EDF9AAB0000000000000000", "status": "okay"},
+ {"driveRef": "010000005000C5007EDBE3C70000000000000000", "status": "okay"},
+ {"driveRef": "010000005000C5007EDE4ECF0000000000000001", "status": "okay"},
+ {"driveRef": "010000005000C5007EDF9AAB0000000000000001", "status": "okay"},
+ {"driveRef": "010000005000C5007EDBE3C70000000000000001", "status": "okay"}]})]):
+ firmware_object.wait_for_upgrade_completion()
+
+ def test_wait_for_upgrade_completion_fail(self):
+ """Verify wait for upgrade completion exceptions."""
+ self._set_args({"firmware": ["path/to/test_drive_firmware_1", "path/to/test_drive_firmware_2"], "wait_for_completion": True})
+ firmware_object = NetAppESeriesDriveFirmware()
+ firmware_object.upgrade_drives_online = True
+ firmware_object.upgrade_list = lambda: self.UPGRADE_LIST_RESPONSE
+ firmware_object.WAIT_TIMEOUT_SEC = 5
+ response = (200, {"driveStatus": [{"driveRef": "010000005000C5007EDE4ECF0000000000000000", "status": "inProgress"},
+ {"driveRef": "010000005000C5007EDF9AAB0000000000000000", "status": "inProgressRecon"},
+ {"driveRef": "010000005000C5007EDBE3C70000000000000000", "status": "pending"},
+ {"driveRef": "010000005000C5007EDE4ECF0000000000000001", "status": "notAttempted"},
+ {"driveRef": "010000005000C5007EDF9AAB0000000000000001", "status": "okay"},
+ {"driveRef": "010000005000C5007EDBE3C70000000000000001", "status": "okay"}]})
+ with self.assertRaisesRegexp(AnsibleFailJson, "Timed out waiting for drive firmware upgrade."):
+ with mock.patch(self.SLEEP_FUNC, return_value=None):
+ with mock.patch(self.REQUEST_FUNC, return_value=response):
+ firmware_object.wait_for_upgrade_completion()
+
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to retrieve drive status."):
+ with mock.patch(self.SLEEP_FUNC, return_value=None):
+ with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
+ firmware_object.wait_for_upgrade_completion()
+
+ response = (200, {"driveStatus": [{"driveRef": "010000005000C5007EDE4ECF0000000000000000", "status": "_UNDEFINED"},
+ {"driveRef": "010000005000C5007EDF9AAB0000000000000000", "status": "inProgressRecon"},
+ {"driveRef": "010000005000C5007EDBE3C70000000000000000", "status": "pending"},
+ {"driveRef": "010000005000C5007EDE4ECF0000000000000001", "status": "notAttempted"},
+ {"driveRef": "010000005000C5007EDF9AAB0000000000000001", "status": "okay"},
+ {"driveRef": "010000005000C5007EDBE3C70000000000000001", "status": "okay"}]})
+ with self.assertRaisesRegexp(AnsibleFailJson, "Drive firmware upgrade failed."):
+ with mock.patch(self.SLEEP_FUNC, return_value=None):
+ with mock.patch(self.REQUEST_FUNC, return_value=response):
+ firmware_object.wait_for_upgrade_completion()
+
+ def test_upgrade_pass(self):
+ """Verify upgrade upgrade in progress variable properly reports."""
+ self._set_args({"firmware": ["path/to/test_drive_firmware_1", "path/to/test_drive_firmware_2"], "wait_for_completion": False})
+ firmware_object = NetAppESeriesDriveFirmware()
+ firmware_object.upgrade_drives_online = True
+ firmware_object.upgrade_list = lambda: {}
+ with mock.patch(self.REQUEST_FUNC, return_value=(200, {})):
+ firmware_object.upgrade()
+ self.assertTrue(firmware_object.upgrade_in_progress)
+
+ self._set_args({"firmware": ["path_to_test_drive_firmware_1", "path_to_test_drive_firmware_2"], "wait_for_completion": True})
+ firmware_object = NetAppESeriesDriveFirmware()
+ firmware_object.upgrade_drives_online = True
+ firmware_object.upgrade_list = lambda: self.UPGRADE_LIST_RESPONSE
+ with mock.patch(self.REQUEST_FUNC, side_effect=[(200, {}),
+ (200, {"driveStatus": [{"driveRef": "010000005000C5007EDE4ECF0000000000000000", "status": "okay"},
+ {"driveRef": "010000005000C5007EDF9AAB0000000000000000", "status": "okay"},
+ {"driveRef": "010000005000C5007EDBE3C70000000000000000", "status": "okay"},
+ {"driveRef": "010000005000C5007EDE4ECF0000000000000001", "status": "okay"},
+ {"driveRef": "010000005000C5007EDF9AAB0000000000000001", "status": "okay"},
+ {"driveRef": "010000005000C5007EDBE3C70000000000000001", "status": "okay"}]})]):
+ firmware_object.upgrade()
+ self.assertFalse(firmware_object.upgrade_in_progress)
+
+ def test_upgrade_fail(self):
+ """Verify upgrade method exceptions."""
+ self._set_args({"firmware": ["path_to_test_drive_firmware_1", "path_to_test_drive_firmware_2"]})
+ firmware_object = NetAppESeriesDriveFirmware()
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to upgrade drive firmware."):
+ with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
+ firmware_object.upgrade()
diff --git a/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_facts.py b/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_facts.py
new file mode 100644
index 000000000..d3d094278
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_facts.py
@@ -0,0 +1,470 @@
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_facts import Facts
+from units.modules.utils import AnsibleFailJson, ModuleTestCase, set_module_args
+from units.compat import mock
+
+
+class FactsTest(ModuleTestCase):
+ REQUIRED_PARAMS = {
+ 'api_username': 'rw',
+ 'api_password': 'password',
+ 'api_url': 'http://localhost',
+ 'ssid': '1'
+ }
+ REQUEST_FUNC = 'ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_facts.Facts.request'
+ GET_CONTROLLERS_FUNC = 'ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_facts.Facts.get_controllers'
+ WORKLOAD_RESPONSE = [{"id": "4200000001000000000000000000000000000000", "name": "beegfs_metadata",
+ "workloadAttributes": [{"key": "profileId", "value": "ansible_workload_1"}]},
+ {"id": "4200000002000000000000000000000000000000", "name": "other_workload_1",
+ "workloadAttributes": [{"key": "profileId", "value": "Other_1"}]}]
+ GRAPH_RESPONSE = {
+ "sa": {"saData": {"storageArrayLabel": "ictm0718s01c1", "saId": {"worldWideName": "600A098000A4B28D000000005CF10481"}, "fwVersion": "08.42.30.05",
+ "chassisSerialNumber": "021633035190"},
+ "featureParameters": {"cacheBlockSizes": [4096, 8192, 16384, 32768],
+ "supportedSegSizes": [32768, 65536, 131072, 262144, 524288, 495616, 655360, 1982464]},
+ "capabilities": ["autoCodeSync", "autoLunTransfer", "subLunsAllowed", "stagedDownload", "mixedDriveTypes", "bundleMigration", "raid6",
+ "performanceTier", "secureVolume", "protectionInformation", "ssdSupport", "driveSlotLimit", "flashReadCache",
+ "storagePoolsType2", "totalNumberOfArvmMirrorsPerArray", "totalNumberOfPitsPerArray", "totalNumberOfThinVolumesPerArray"],
+ "premiumFeatures": [],
+ "hostSpecificVals": [{"hostType": "FactoryDefault", "index": 0}, {"hostType": "W2KNETNCL", "index": 1}, {"hostPortType": "W2KNETCL", "index": 8},
+ {"hostType": "LnxTPGSALUA_SF", "index": 27}, {"hostType": "LnxDHALUA", "index": 28}]}, "controller": [
+ {"active": True, "quiesced": False, "status": "optimal", "controllerRef": "070000000000000000000001",
+ "physicalLocation": {"trayRef": "0E00000000000000000000000000000000000000", "slot": 1,
+ "locationParent": {"refType": "generic", "controllerRef": None, "symbolRef": "0000000000000000000000000000000000000000",
+ "typedReference": None}, "locationPosition": 1, "label": "A"}, "manufacturer": "NETAPP ",
+ "manufacturerDate": "1474675200", "appVersion": "08.42.30.05", "bootVersion": "08.42.30.05", "productID": "INF-01-00 ",
+ "productRevLevel": "0842", "serialNumber": "021619039162 ", "boardID": "2806", "cacheMemorySize": 3328, "processorMemorySize": 1278,
+ "hostInterfaces": [{"interfaceType": "iscsi", "fibre": None, "ib": None,
+ "iscsi": {"channel": 1, "channelPortRef": "1F00010001010000000000000000000000000000", "tcpListenPort": 3260,
+ "ipv4Enabled": True, "ipv4Data": {"ipv4Address": "0.0.0.0", "ipv4AddressConfigMethod": "configStatic",
+ "ipv4OutboundPacketPriority": {"isEnabled": False, "value": 1},
+ "ipv4VlanId": {"isEnabled": False, "value": 1},
+ "ipv4AddressData": {"configState": "configured", "ipv4Address": "10.10.11.110",
+ "ipv4SubnetMask": "255.255.255.0",
+ "ipv4GatewayAddress": "0.0.0.0"}},
+ "interfaceData": {"type": "ethernet", "ethernetData": {
+ "partData": {"vendorName": "QLogic Corporation", "partNumber": "83xx", "revisionNumber": "5.5.31.511",
+ "serialNumber": "00a098a4b28f"}, "macAddress": "00A098A4B293", "fullDuplex": True,
+ "maximumFramePayloadSize": 9000, "currentInterfaceSpeed": "speed10gig", "maximumInterfaceSpeed": "speed10gig",
+ "linkStatus": "up", "supportedInterfaceSpeeds": ["speed1gig", "speed10gig"], "autoconfigSupport": False,
+ "copperCableDiagnosticsSupport": False}, "infinibandData": None},
+ "interfaceRef": "2201020000000000000000000000000000000000", "ipv6Enabled": True,
+ "ipv6Data": {"ipv6LocalAddresses": [
+ {"address": "FE8000000000000002A098FFFEA4B293",
+ "addressState": {"addressType": "typeInterface", "interfaceAddressState": "configured",
+ "routerAddressState": "__UNDEFINED"}}], "ipv6RoutableAddresses": [
+ {"address": "00000000000000000000000000000000",
+ "addressState": {"addressType": "typeInterface", "interfaceAddressState": "unconfigured",
+ "routerAddressState": "__UNDEFINED"}},
+ {"address": "00000000000000000000000000000000",
+ "addressState": {"addressType": "typeInterface", "interfaceAddressState": "unconfigured",
+ "routerAddressState": "__UNDEFINED"}}],
+ "ipv6PortRouterAddress": {"address": "00000000000000000000000000000000",
+ "addressState": {"addressType": "typeRouter", "interfaceAddressState": "__UNDEFINED",
+ "routerAddressState": "unknown"}},
+ "ipv6AddressConfigMethod": "configStateless", "ipv6OutboundPacketPriority": {"isEnabled": False, "value": 1},
+ "ipv6VlanId": {"isEnabled": False, "value": 1}, "ipv6HopLimit": 64, "ipv6NdReachableTime": 30000,
+ "ipv6NdRetransmitTime": 1000, "ipv6NdStaleTimeout": 30000, "ipv6DuplicateAddressDetectionAttempts": 1},
+ "physicalLocation": {"trayRef": "0000000000000000000000000000000000000000", "slot": 0,
+ "locationParent": {"refType": "generic", "controllerRef": None,
+ "symbolRef": "0000000000000000000000000000000000000000",
+ "typedReference": None}, "locationPosition": 0, "label": ""},
+ "protectionInformationCapable": True, "isIPv6Capable": True, "oneWayMaxRate": "1230000000",
+ "bidirectionalMaxRate": "2120000000", "iqn": "iqn.1992-08.com.netapp:2806.600a098000a4b28d000000005cf10481",
+ "controllerId": "070000000000000000000001",
+ "addressId": "iqn.1992-08.com.netapp:2806.600a098000a4b28d000000005cf10481",
+ "niceAddressId": "iqn.1992-08.com.netapp:2806.600a098000a4b28d000000005cf10481",
+ "interfaceId": "2201020000000000000000000000000000000000", "id": "2201020000000000000000000000000000000000"},
+ "sas": None, "sata": None, "scsi": None}],
+ "driveInterfaces": [
+ {"interfaceType": "sas", "fibre": None, "ib": None, "iscsi": None,
+ "sas": {"channel": 1, "currentInterfaceSpeed": "speed12gig", "maximumInterfaceSpeed": "speed12gig", "part": "LSISAS3008",
+ "revision": 172688896, "isDegraded": False,
+ "iocPort": {
+ "parent": {"type": "controller", "controller": "070000000000000000000001", "drive": None, "expander": None, "hostBoardRef": None},
+ "attachedDevice": {"channel": 1, "channelType": "driveside",
+ "sasAttachedDeviceData": {"type": "expander", "alternateController": None, "drive": None,
+ "expander": "2000000000000000000000630001000000000000",
+ "remoteHostPortAddress": None,
+ "localController": None, "physicalLocation": None}}, "state": "optimal",
+ "miswireType": "None", "channelPortRef": "1F01000001010000000000000000000000000000",
+ "sasPhys": [{"phyIdentifier": 4, "isOperational": True}, {"phyIdentifier": 5, "isOperational": True},
+ {"phyIdentifier": 6, "isOperational": True}, {"phyIdentifier": 7, "isOperational": True}],
+ "portTypeData": {"portType": "endDevice", "portIdentifier": "500A098A4B28D004", "routingType": "__UNDEFINED"},
+ "portMode": "internal",
+ "domainNumber": 1, "attachedChannelPortRef": "0000000000000000000000000000000000000000", "discoveryStatus": 0},
+ "interfaceRef": "2201000000000000000000000000000000000000",
+ "physicalLocation": {"trayRef": "0000000000000000000000000000000000000000", "slot": 0,
+ "locationParent": {"refType": "generic", "controllerRef": None,
+ "symbolRef": "0000000000000000000000000000000000000000", "typedReference": None},
+ "locationPosition": 0, "label": ""}, "protectionInformationCapable": True, "oneWayMaxRate": "4400000000",
+ "bidirectionalMaxRate": "8400000000", "controllerId": None, "addressId": "500A098A4B28D004", "niceAddressId": "500A098A4B28D004",
+ "interfaceId": "2201000000000000000000000000000000000000", "basePortAddress": "500A098A4B28D00",
+ "id": "2201000000000000000000000000000000000000"}, "sata": None, "scsi": None}],
+ "netInterfaces": [{"interfaceType": "ethernet",
+ "ethernet": {"interfaceName": "wan0", "channel": 1, "speed": 1000, "ip": 175178176, "alias": "ictm0718s01c1-a",
+ "macAddr": "00A098A4B28D", "gatewayIp": 175177985, "subnetMask": -256, "bootpUsed": False, "rloginEnabled": True,
+ "reserved1": "0000000000000000", "setupError": False, "reserved2": "",
+ "interfaceRef": "2800070000000000000000000001000000000000", "linkStatus": "up", "ipv4Enabled": True,
+ "ipv4Address": "10.113.1.192", "ipv4SubnetMask": "255.255.255.0", "ipv4AddressConfigMethod": "configStatic",
+ "ipv6Enabled": False, "ipv6LocalAddress": {"address": "00000000000000000000000000000000",
+ "addressState": {"addressType": "typeInterface",
+ "interfaceAddressState": "configured",
+ "routerAddressState": "__UNDEFINED"}},
+ "ipv6PortStaticRoutableAddress": {"address": "00000000000000000000000000000000",
+ "addressState": {"addressType": "typeInterface",
+ "interfaceAddressState": "__UNDEFINED",
+ "routerAddressState": "__UNDEFINED"}},
+ "ipv6PortRoutableAddresses": [], "ipv6AddressConfigMethod": "configStatic", "fullDuplex": True,
+ "supportedSpeedSettings": ["speedAutoNegotiated", "speed10MbitHalfDuplex", "speed10MbitFullDuplex",
+ "speed100MbitHalfDuplex", "speed100MbitFullDuplex", "speed1000MbitFullDuplex"],
+ "configuredSpeedSetting": "speedAutoNegotiated", "currentSpeed": "speed1gig",
+ "physicalLocation": {"trayRef": "0E00000000000000000000000000000000000000", "slot": 0,
+ "locationParent": {"refType": "controller", "controllerRef": "070000000000000000000001",
+ "symbolRef": None, "typedReference": None}, "locationPosition": 1,
+ "label": "P1"}, "ipv4GatewayAddress": "10.113.1.1",
+ "controllerRef": "070000000000000000000001", "controllerSlot": 1,
+ "dnsProperties": {
+ "acquisitionProperties": {"dnsAcquisitionType": "stat",
+ "dnsServers": [
+ {"addressType": "ipv4", "ipv4Address": "10.193.0.250", "ipv6Address": None},
+ {"addressType": "ipv4", "ipv4Address": "10.192.0.250", "ipv6Address": None}]},
+ "dhcpAcquiredDnsServers": []},
+ "ntpProperties": {
+ "acquisitionProperties": {"ntpAcquisitionType": "stat", "ntpServers": [
+ {"addrType": "ipvx", "domainName": None,
+ "ipvxAddress": {"addressType": "ipv4", "ipv4Address": "216.239.35.0", "ipv6Address": None}},
+ {"addrType": "ipvx", "domainName": None,
+ "ipvxAddress": {"addressType": "ipv4", "ipv4Address": "216.239.35.4", "ipv6Address": None}}]},
+ "dhcpAcquiredNtpServers": []},
+ "id": "2800070000000000000000000001000000000000"}}],
+ "inventory": [], "reserved1": "000000000000000000000000", "reserved2": "", "hostBoardID": "None", "physicalCacheMemorySize": 4864,
+ "readyToRemove": False, "boardSubmodelID": "319", "submodelSupported": True, "oemPartNumber": "E2800A-8GB", "partNumber": "111-02829+C0 ",
+ "rtrAttributes": {"cruType": "dedicated", "parentCru": None, "rtrAttributeData": {"hasReadyToRemoveIndicator": False, "readyToRemove": False}},
+ "bootTime": "1563988406", "modelName": "2806",
+ "networkSettings": {"ipv4DefaultRouterAddress": "10.113.1.1",
+ "ipv6DefaultRouterAddress": {"address": "00000000000000000000000000000000",
+ "addressState": {"addressType": "typeInterface",
+ "interfaceAddressState": "__UNDEFINED", "routerAddressState": "__UNDEFINED"}},
+ "ipv6CandidateDefaultRouterAddresses": [],
+ "remoteAccessEnabled": True,
+ "dnsProperties": {"acquisitionProperties": {"dnsAcquisitionType": "stat",
+ "dnsServers": [
+ {"addressType": "ipv4", "ipv4Address": "10.193.0.250", "ipv6Address": None},
+ {"addressType": "ipv4", "ipv4Address": "10.192.0.250", "ipv6Address": None}]},
+ "dhcpAcquiredDnsServers": []},
+ "ntpProperties": {
+ "acquisitionProperties": {
+ "ntpAcquisitionType": "stat", "ntpServers": [
+ {"addrType": "ipvx", "domainName": None,
+ "ipvxAddress": {"addressType": "ipv4", "ipv4Address": "216.239.35.0", "ipv6Address": None}},
+ {"addrType": "ipvx", "domainName": None,
+ "ipvxAddress": {"addressType": "ipv4", "ipv4Address": "216.239.35.4", "ipv6Address": None}}]},
+ "dhcpAcquiredNtpServers": []}},
+ "repairPolicy": {"removalData": {"removalMethod": "__UNDEFINED", "rtrAttributes": None}, "replacementMethod": "__UNDEFINED"},
+ "flashCacheMemorySize": 419430400, "ctrlIocDumpData": {"iocDumpNeedsRetrieved": False, "iocDumpTag": 0, "timeStamp": "0"},
+ "locateInProgress": False, "hasTrayIdentityIndicator": False, "controllerErrorMode": "notInErrorMode",
+ "codeVersions": [{"codeModule": "raid", "versionString": "08.42.30.05"}, {"codeModule": "hypervisor", "versionString": "08.42.30.05"},
+ {"codeModule": "management", "versionString": "11.42.0000.0026"}, {"codeModule": "iom", "versionString": "11.42.0G00.0001"},
+ {"codeModule": "bundle", "versionString": "08.42.30.05"}, {"codeModule": "bundleDisplay", "versionString": "11.40.3R2"}],
+ "id": "070000000000000000000001"}],
+ "drive": [{"offline": False, "hotSpare": False, "invalidDriveData": False, "available": True, "pfa": False,
+ "driveRef": "0100000050000396AC882ED10000000000000000", "status": "optimal", "cause": "None",
+ "interfaceType": {"driveType": "sas", "fibre": None,
+ "sas": {"deviceName": "50000396AC882ED1",
+ "drivePortAddresses": [{"channel": 2, "portIdentifier": "50000396AC882ED3"},
+ {"channel": 1, "portIdentifier": "50000396AC882ED2"}]},
+ "scsi": None},
+ "physicalLocation": {"trayRef": "0E00000000000000000000000000000000000000", "slot": 6,
+ "locationParent": {"refType": "genericTyped", "controllerRef": None, "symbolRef": None,
+ "typedReference": {"componentType": "tray",
+ "symbolRef": "0E00000000000000000000000000000000000000"}},
+ "locationPosition": 6, "label": "5"}, "manufacturer": "TOSHIBA ",
+ "manufacturerDate": "1447200000", "productID": "PX04SVQ160 ", "serialNumber": "Y530A001T5MD", "softwareVersion": "MSB6", "blkSize": 512,
+ "usableCapacity": "1599784443904", "rawCapacity": "1600321314816", "worldWideName": "50000396AC882ED10000000000000000",
+ "currentVolumeGroupRef": "0000000000000000000000000000000000000000", "sparedForDriveRef": "0000000000000000000000000000000000000000",
+ "mirrorDrive": "0000000000000000000000000000000000000000", "nonRedundantAccess": False, "workingChannel": -1, "volumeGroupIndex": -1,
+ "currentSpeed": "speed12gig", "maxSpeed": "speed12gig", "uncertified": False, "hasDegradedChannel": False, "degradedChannels": [],
+ "phyDriveType": "sas", "spindleSpeed": 0, "rtrAttributes": {"cruType": "dedicated", "parentCru": None,
+ "rtrAttributeData": {"hasReadyToRemoveIndicator": False,
+ "readyToRemove": False}}, "reserved": "",
+ "phyDriveTypeData": {"phyDriveType": "sas", "sataDriveAttributes": None}, "pfaReason": "None", "bypassSource": [],
+ "repairPolicy": {"removalData": {"removalMethod": "self", "rtrAttributes": {"hasReadyToRemoveIndicator": False, "readyToRemove": False}},
+ "replacementMethod": "self"}, "fdeCapable": True, "fdeEnabled": False, "fdeLocked": False,
+ "lockKeyID": "0000000000000000000000000000000000000000",
+ "ssdWearLife": {"averageEraseCountPercent": 18, "spareBlocksRemainingPercent": 91, "isWearLifeMonitoringSupported": True,
+ "percentEnduranceUsed": 18}, "driveMediaType": "ssd", "fpgaVersion": "",
+ "protectionInformationCapabilities": {"protectionInformationCapable": True, "protectionType": "type2Protection"},
+ "protectionInformationCapable": False, "protectionType": "type0Protection", "interposerPresent": False,
+ "interposerRef": "0000000000000000000000000000000000000000", "currentCommandAgingTimeout": 6, "defaultCommandAgingTimeout": 6,
+ "driveTemperature": {"currentTemp": 25, "refTemp": 64}, "blkSizePhysical": 4096, "lowestAlignedLBA": "0", "removed": False,
+ "locateInProgress": False, "fipsCapable": False, "firmwareVersion": "MSB6", "lockKeyIDValue": None,
+ "id": "0100000050000396AC882ED10000000000000000"},
+ {"offline": False, "hotSpare": False, "invalidDriveData": False, "available": True, "pfa": False,
+ "driveRef": "0100000050000396AC882EDD0000000000000000", "status": "optimal", "cause": "None",
+ "interfaceType": {"driveType": "sas", "fibre": None,
+ "sas": {"deviceName": "50000396AC882EDD",
+ "drivePortAddresses": [{"channel": 2, "portIdentifier": "50000396AC882EDF"},
+ {"channel": 1, "portIdentifier": "50000396AC882EDE"}]},
+ "scsi": None},
+ "physicalLocation": {"trayRef": "0E00000000000000000000000000000000000000", "slot": 8,
+ "locationParent": {"refType": "genericTyped", "controllerRef": None, "symbolRef": None,
+ "typedReference": {"componentType": "tray",
+ "symbolRef": "0E00000000000000000000000000000000000000"}},
+ "locationPosition": 8, "label": "7"}, "manufacturer": "TOSHIBA ",
+ "manufacturerDate": "1447200000", "productID": "PX04SVQ160 ", "serialNumber": "Y530A004T5MD", "softwareVersion": "MSB6", "blkSize": 512,
+ "usableCapacity": "1599784443904", "rawCapacity": "1600321314816", "worldWideName": "50000396AC882EDD0000000000000000",
+ "currentVolumeGroupRef": "0000000000000000000000000000000000000000", "sparedForDriveRef": "0000000000000000000000000000000000000000",
+ "mirrorDrive": "0000000000000000000000000000000000000000", "nonRedundantAccess": False, "workingChannel": -1, "volumeGroupIndex": -1,
+ "currentSpeed": "speed12gig", "maxSpeed": "speed12gig", "uncertified": False, "hasDegradedChannel": False, "degradedChannels": [],
+ "phyDriveType": "sas", "spindleSpeed": 0, "rtrAttributes": {"cruType": "dedicated", "parentCru": None,
+ "rtrAttributeData": {"hasReadyToRemoveIndicator": False,
+ "readyToRemove": False}}, "reserved": "",
+ "phyDriveTypeData": {"phyDriveType": "sas", "sataDriveAttributes": None}, "pfaReason": "None", "bypassSource": [],
+ "repairPolicy": {"removalData": {"removalMethod": "self", "rtrAttributes": {"hasReadyToRemoveIndicator": False, "readyToRemove": False}},
+ "replacementMethod": "self"}, "fdeCapable": True, "fdeEnabled": False, "fdeLocked": False,
+ "lockKeyID": "0000000000000000000000000000000000000000",
+ "ssdWearLife": {"averageEraseCountPercent": 18, "spareBlocksRemainingPercent": 91, "isWearLifeMonitoringSupported": True,
+ "percentEnduranceUsed": 18}, "driveMediaType": "ssd", "fpgaVersion": "",
+ "protectionInformationCapabilities": {"protectionInformationCapable": True, "protectionType": "type2Protection"},
+ "protectionInformationCapable": False, "protectionType": "type0Protection", "interposerPresent": False,
+ "interposerRef": "0000000000000000000000000000000000000000", "currentCommandAgingTimeout": 6, "defaultCommandAgingTimeout": 6,
+ "driveTemperature": {"currentTemp": 25, "refTemp": 64}, "blkSizePhysical": 4096, "lowestAlignedLBA": "0", "removed": False,
+ "locateInProgress": False, "fipsCapable": False, "firmwareVersion": "MSB6", "lockKeyIDValue": None,
+ "id": "0100000050000396AC882EDD0000000000000000"}],
+ "volumeGroup": [
+ {"sequenceNum": 1, "offline": False, "raidLevel": "raid6", "worldWideName": "600A098000A4B9D10000380A5D4AAC3C",
+ "volumeGroupRef": "04000000600A098000A4B9D10000380A5D4AAC3C", "reserved1": "000000000000000000000000", "reserved2": "",
+ "trayLossProtection": False, "label": "beegfs_storage_vg", "state": "complete", "spindleSpeedMatch": True, "spindleSpeed": 10500,
+ "isInaccessible": False, "securityType": "capable", "drawerLossProtection": False, "protectionInformationCapable": False,
+ "protectionInformationCapabilities": {"protectionInformationCapable": True, "protectionType": "type2Protection"},
+ "volumeGroupData": {"type": "unknown", "diskPoolData": None},
+ "usage": "standard", "driveBlockFormat": "allNative", "reservedSpaceAllocated": False, "securityLevel": "fde", "usedSpace": "1099511627776",
+ "totalRaidedSpace": "9597654597632",
+ "extents": [{"sectorOffset": "268435456", "rawCapacity": "8498142969856", "raidLevel": "raid6",
+ "volumeGroupRef": "04000000600A098000A4B9D10000380A5D4AAC3C", "freeExtentRef": "03000000600A098000A4B9D10000380A5D4AAC3C",
+ "reserved1": "000000000000000000000000", "reserved2": ""}],
+ "largestFreeExtentSize": "8498142969856", "raidStatus": "optimal", "freeSpace": "8498142969856", "drivePhysicalType": "sas",
+ "driveMediaType": "hdd", "normalizedSpindleSpeed": "spindleSpeed10k", "diskPool": False,
+ "id": "04000000600A098000A4B9D10000380A5D4AAC3C", "name": "beegfs_storage_vg"}], "volume": [
+ {"offline": False, "extremeProtection": False, "volumeHandle": 0, "raidLevel": "raid6", "sectorOffset": "0",
+ "worldWideName": "600A098000A4B28D00003E435D4AAC54", "label": "beegfs_storage_01_1", "blkSize": 512, "capacity": "1099511627776",
+ "reconPriority": 1, "segmentSize": 131072, "action": "None",
+ "cache": {"cwob": False, "enterpriseCacheDump": False, "mirrorActive": True, "mirrorEnable": True, "readCacheActive": False,
+ "readCacheEnable": False, "writeCacheActive": True, "writeCacheEnable": True, "cacheFlushModifier": "flush10Sec",
+ "readAheadMultiplier": 1}, "mediaScan": {"enable": True, "parityValidationEnable": True},
+ "volumeRef": "02000000600A098000A4B28D00003E435D4AAC54", "status": "optimal", "volumeGroupRef": "04000000600A098000A4B9D10000380A5D4AAC3C",
+ "currentManager": "070000000000000000000001", "preferredManager": "070000000000000000000001",
+ "perms": {"mapToLUN": True, "snapShot": True, "format": True, "reconfigure": True, "mirrorPrimary": True, "mirrorSecondary": True,
+ "copySource": True, "copyTarget": True, "readable": True, "writable": True, "rollback": True, "mirrorSync": True, "newImage": True,
+ "allowDVE": True, "allowDSS": True, "concatVolumeMember": False, "flashReadCache": True, "asyncMirrorPrimary": True,
+ "asyncMirrorSecondary": True, "pitGroup": True, "cacheParametersChangeable": True, "allowThinManualExpansion": False,
+ "allowThinGrowthParametersChange": False},
+ "mgmtClientAttribute": 0, "dssPreallocEnabled": False, "dssMaxSegmentSize": 0, "preReadRedundancyCheckEnabled": False,
+ "protectionInformationCapable": False, "protectionType": "type0Protection", "applicationTagOwned": True,
+ "repairedBlockCount": 0, "extendedUniqueIdentifier": "", "cacheMirroringValidateProtectionInformation": False,
+ "expectedProtectionInformationAppTag": 0, "volumeUse": "standardVolume", "volumeFull": False, "volumeCopyTarget": False, "volumeCopySource": False,
+ "pitBaseVolume": False, "asyncMirrorTarget": False, "asyncMirrorSource": False, "remoteMirrorSource": False, "remoteMirrorTarget": False,
+ "diskPool": False, "flashCached": False, "increasingBy": "0", "metadata": [], "dataAssurance": False, "objectType": "volume",
+ "listOfMappings": [
+ {"lunMappingRef": "88000000A1010000000000000000000000000000", "lun": 1, "ssid": 0, "perms": 15,
+ "volumeRef": "02000000600A098000A4B28D00003E435D4AAC54", "type": "host", "mapRef": "84000000600A098000A4B28D00303D065D430118",
+ "id": "88000000A1010000000000000000000000000000"}],
+ "mapped": True, "currentControllerId": "070000000000000000000001",
+ "cacheSettings": {"cwob": False, "enterpriseCacheDump": False, "mirrorActive": True, "mirrorEnable": True, "readCacheActive": False,
+ "readCacheEnable": False, "writeCacheActive": True, "writeCacheEnable": True, "cacheFlushModifier": "flush10Sec",
+ "readAheadMultiplier": 1},
+ "thinProvisioned": False, "preferredControllerId": "070000000000000000000001", "totalSizeInBytes": "1099511627776", "onlineVolumeCopy": False,
+ "wwn": "600A098000A4B28D00003E435D4AAC54", "name": "beegfs_storage_01_1", "id": "02000000600A098000A4B28D00003E435D4AAC54"}],
+ "storagePoolBundle": {"cluster": [], "host": [
+ {"hostRef": "84000000600A098000A4B28D00303D005D430107", "clusterRef": "0000000000000000000000000000000000000000", "label": "test",
+ "isSAControlled": False, "confirmLUNMappingCreation": False, "hostTypeIndex": 28, "protectionInformationCapableAccessMethod": True,
+ "isLargeBlockFormatHost": False, "isLun0Restricted": False, "ports": [],
+ "initiators": [
+ {"initiatorRef": "89000000600A098000A4B9D1003037005D4300F5",
+ "nodeName": {"ioInterfaceType": "iscsi", "iscsiNodeName": "iqn.iscsi_tests1", "remoteNodeWWN": None, "nvmeNodeName": None},
+ "alias": {"ioInterfaceType": "iscsi", "iscsiAlias": ""}, "label": "iscsi_test1",
+ "configuredAuthMethods": {"authMethodData": [{"authMethod": "None", "chapSecret": None}]},
+ "hostRef": "84000000600A098000A4B28D00303D005D430107", "initiatorInactive": False, "id": "89000000600A098000A4B9D1003037005D4300F5"}],
+ "hostSidePorts": [{"type": "iscsi", "address": "iqn.iscsi_tests1", "label": "iscsi_test1"}],
+ "id": "84000000600A098000A4B28D00303D005D430107", "name": "test"},
+ {"hostRef": "84000000600A098000A4B9D1003037035D4300F8", "clusterRef": "0000000000000000000000000000000000000000", "label": "test2",
+ "isSAControlled": True, "confirmLUNMappingCreation": False, "hostTypeIndex": 28, "protectionInformationCapableAccessMethod": True,
+ "isLargeBlockFormatHost": False, "isLun0Restricted": False, "ports": [],
+ "initiators": [
+ {"initiatorRef": "89000000600A098000A4B9D1003037075D4300F9",
+ "nodeName": {"ioInterfaceType": "iscsi", "iscsiNodeName": "iqn.iscsi_tests2", "remoteNodeWWN": None, "nvmeNodeName": None},
+ "alias": {"ioInterfaceType": "iscsi", "iscsiAlias": ""}, "label": "iscsi_test2",
+ "configuredAuthMethods": {"authMethodData": [{"authMethod": "None", "chapSecret": None}]},
+ "hostRef": "84000000600A098000A4B9D1003037035D4300F8", "initiatorInactive": False, "id": "89000000600A098000A4B9D1003037075D4300F9"}],
+ "hostSidePorts": [{"type": "iscsi", "address": "iqn.iscsi_tests2", "label": "iscsi_test2"}],
+ "id": "84000000600A098000A4B9D1003037035D4300F8", "name": "test2"},
+ {"hostRef": "84000000600A098000A4B28D00303D065D430118", "clusterRef": "0000000000000000000000000000000000000000", "label": "beegfs_storage1",
+ "isSAControlled": False, "confirmLUNMappingCreation": False, "hostTypeIndex": 28, "protectionInformationCapableAccessMethod": True,
+ "isLargeBlockFormatHost": False, "isLun0Restricted": False, "ports": [],
+ "initiators": [
+ {"initiatorRef": "89000000600A098000A4B28D00303CF55D4300E3",
+ "nodeName": {"ioInterfaceType": "iscsi", "iscsiNodeName": "iqn.1993-08.org.debian.beegfs-storage1:01:b0621126818", "remoteNodeWWN": None,
+ "nvmeNodeName": None}, "alias": {"ioInterfaceType": "iscsi", "iscsiAlias": ""}, "label": "beegfs_storage1_iscsi_0",
+ "configuredAuthMethods": {"authMethodData": [{"authMethod": "None", "chapSecret": None}]},
+ "hostRef": "84000000600A098000A4B28D00303D065D430118", "initiatorInactive": False, "id": "89000000600A098000A4B28D00303CF55D4300E3"}],
+ "hostSidePorts": [{"type": "iscsi", "address": "iqn.1993-08.org.debian.beegfs-storage1:01:b0621126818", "label": "beegfs_storage1_iscsi_0"}],
+ "id": "84000000600A098000A4B28D00303D065D430118", "name": "beegfs_storage1"},
+ {"hostRef": "84000000600A098000A4B9D10030370B5D430109", "clusterRef": "0000000000000000000000000000000000000000", "label": "beegfs_metadata1",
+ "isSAControlled": False, "confirmLUNMappingCreation": False, "hostTypeIndex": 28, "protectionInformationCapableAccessMethod": True,
+ "isLargeBlockFormatHost": False, "isLun0Restricted": False, "ports": [],
+ "initiators": [
+ {"initiatorRef": "89000000600A098000A4B28D00303CFC5D4300F7",
+ "nodeName": {"ioInterfaceType": "iscsi", "iscsiNodeName": "iqn.1993-08.org.debian.beegfs-metadata:01:69e4efdf30b8", "remoteNodeWWN": None,
+ "nvmeNodeName": None}, "alias": {"ioInterfaceType": "iscsi", "iscsiAlias": ""}, "label": "beegfs_metadata1_iscsi_0",
+ "configuredAuthMethods": {"authMethodData": [{"authMethod": "None", "chapSecret": None}]},
+ "hostRef": "84000000600A098000A4B9D10030370B5D430109", "initiatorInactive": False, "id": "89000000600A098000A4B28D00303CFC5D4300F7"}],
+ "hostSidePorts": [{"type": "iscsi", "address": "iqn.1993-08.org.debian.beegfs-metadata:01:69e4efdf30b8", "label": "beegfs_metadata1_iscsi_0"}],
+ "id": "84000000600A098000A4B9D10030370B5D430109", "name": "beegfs_metadata1"}], "lunMapping": [
+ {"lunMappingRef": "8800000000000000000000000000000000000000", "lun": 7, "ssid": 16384, "perms": 15,
+ "volumeRef": "21000000600A098000A4B28D000027EC5CF10481", "type": "all", "mapRef": "0000000000000000000000000000000000000000",
+ "id": "8800000000000000000000000000000000000000"},
+ {"lunMappingRef": "880000008B010000000000000000000000000000", "lun": 7, "ssid": 16384, "perms": 15,
+ "volumeRef": "21000000600A098000A4B28D000027EC5CF10481", "type": "host", "mapRef": "84000000600A098000A4B28D00303D065D430118",
+ "id": "880000008B010000000000000000000000000000"},
+ {"lunMappingRef": "8800000090010000000000000000000000000000", "lun": 7, "ssid": 16384, "perms": 15,
+ "volumeRef": "21000000600A098000A4B28D000027EC5CF10481", "type": "host", "mapRef": "84000000600A098000A4B9D10030370B5D430109",
+ "id": "8800000090010000000000000000000000000000"},
+ {"lunMappingRef": "8800000092010000000000000000000000000000", "lun": 7, "ssid": 16384, "perms": 15,
+ "volumeRef": "21000000600A098000A4B28D000027EC5CF10481", "type": "host", "mapRef": "84000000600A098000A4B28D00303D005D430107",
+ "id": "8800000092010000000000000000000000000000"}, {"lunMappingRef": "88000000A1010000000000000000000000000000", "lun": 1, "ssid": 0, "perms": 15,
+ "volumeRef": "02000000600A098000A4B28D00003E435D4AAC54", "type": "host",
+ "mapRef": "84000000600A098000A4B28D00303D065D430118",
+ "id": "88000000A1010000000000000000000000000000"}]}, "highLevelVolBundle": {"pit": []}}
+
+ EXPECTED_GET_ARRAY_FACTS = {'facts_from_proxy': False,
+ 'netapp_controllers': [{'name': 'A', 'serial': '021619039162', 'status': 'optimal'}],
+ 'netapp_default_hostgroup_access_volume_lun': 7,
+ 'netapp_disks': [
+ {'available': True, 'firmware_version': 'MSB6', 'id': '0100000050000396AC882ED10000000000000000', 'media_type': 'ssd',
+ 'product_id': 'PX04SVQ160 ', 'serial_number': 'Y530A001T5MD', 'status': 'optimal',
+ 'tray_ref': '0E00000000000000000000000000000000000000', 'usable_bytes': '1599784443904'},
+ {'available': True, 'firmware_version': 'MSB6', 'id': '0100000050000396AC882EDD0000000000000000', 'media_type': 'ssd',
+ 'product_id': 'PX04SVQ160 ', 'serial_number': 'Y530A004T5MD', 'status': 'optimal',
+ 'tray_ref': '0E00000000000000000000000000000000000000', 'usable_bytes': '1599784443904'}],
+ 'netapp_driveside_interfaces': [{'controller': 'A', 'interface_speed': '12g', 'interface_type': 'sas'}],
+ 'netapp_enabled_features': ['autoCodeSync', 'autoLunTransfer', 'bundleMigration', 'driveSlotLimit', 'flashReadCache',
+ 'mixedDriveTypes', 'performanceTier', 'protectionInformation', 'raid6', 'secureVolume',
+ 'ssdSupport', 'stagedDownload', 'storagePoolsType2', 'subLunsAllowed',
+ 'totalNumberOfArvmMirrorsPerArray', 'totalNumberOfPitsPerArray',
+ 'totalNumberOfThinVolumesPerArray'],
+ 'netapp_host_groups': [{'hosts': ['test',
+ 'test2',
+ 'beegfs_storage1',
+ 'beegfs_metadata1'],
+ 'id': '0000000000000000000000000000000000000000',
+ 'name': 'default_hostgroup'}],
+ 'netapp_host_types': [{'index': 0, 'type': 'FactoryDefault'}, {'index': 1, 'type': 'W2KNETNCL'},
+ {'index': 27, 'type': 'LnxTPGSALUA_SF'}, {'index': 28, 'type': 'LnxDHALUA'}],
+ 'netapp_hosts': [
+ {'group_id': '0000000000000000000000000000000000000000', 'host_type_index': 28,
+ 'hosts_reference': '84000000600A098000A4B28D00303D005D430107',
+ 'id': '84000000600A098000A4B28D00303D005D430107', 'name': 'test',
+ 'ports': [{'address': 'iqn.iscsi_tests1', 'label': 'iscsi_test1', 'type': 'iscsi'}]},
+ {'group_id': '0000000000000000000000000000000000000000', 'host_type_index': 28,
+ 'hosts_reference': '84000000600A098000A4B9D1003037035D4300F8',
+ 'id': '84000000600A098000A4B9D1003037035D4300F8', 'name': 'test2',
+ 'ports': [{'address': 'iqn.iscsi_tests2', 'label': 'iscsi_test2', 'type': 'iscsi'}]},
+ {'group_id': '0000000000000000000000000000000000000000', 'host_type_index': 28,
+ 'hosts_reference': '84000000600A098000A4B28D00303D065D430118',
+ 'id': '84000000600A098000A4B28D00303D065D430118', 'name': 'beegfs_storage1',
+ 'ports': [{'address': 'iqn.1993-08.org.debian.beegfs-storage1:01:b0621126818', 'label': 'beegfs_storage1_iscsi_0',
+ 'type': 'iscsi'}]},
+ {'group_id': '0000000000000000000000000000000000000000', 'host_type_index': 28,
+ 'hosts_reference': '84000000600A098000A4B9D10030370B5D430109',
+ 'id': '84000000600A098000A4B9D10030370B5D430109', 'name': 'beegfs_metadata1',
+ 'ports': [{'address': 'iqn.1993-08.org.debian.beegfs-metadata:01:69e4efdf30b8', 'label': 'beegfs_metadata1_iscsi_0',
+ 'type': 'iscsi'}]}],
+ 'netapp_hostside_interfaces': [{'fc': [], 'ib': [],
+ 'iscsi': [
+ {'controller': 'A', 'current_interface_speed': '10g', 'ipv4_address': '10.10.11.110',
+ 'ipv4_enabled': True,
+ 'ipv4_gateway': '0.0.0.0', 'ipv4_subnet_mask': '255.255.255.0', 'ipv6_enabled': True,
+ 'iqn': 'iqn.1992-08.com.netapp:2806.600a098000a4b28d000000005cf10481', 'link_status': 'up',
+ 'mtu': 9000,
+ 'supported_interface_speeds': ['1g', '10g']}], 'sas': []}],
+ 'netapp_luns_by_target': {'beegfs_metadata1': [],
+ 'beegfs_storage1': [('beegfs_storage_01_1', 1)],
+ 'default_hostgroup': [('beegfs_storage_01_1', 1)],
+ 'test': [],
+ 'test2': []},
+ 'netapp_management_interfaces': [
+ {'alias': 'ictm0718s01c1-a', 'channel': 1, 'controller': 'A', 'dns_config_method': 'stat',
+ 'dns_servers': [{'addressType': 'ipv4', 'ipv4Address': '10.193.0.250', 'ipv6Address': None},
+ {'addressType': 'ipv4', 'ipv4Address': '10.192.0.250', 'ipv6Address': None}],
+ 'ipv4_address': '10.113.1.192',
+ 'ipv4_address_config_method': 'static', 'ipv4_enabled': True, 'ipv4_gateway': '10.113.1.1',
+ 'ipv4_subnet_mask': '255.255.255.0', 'ipv6_enabled': False, 'link_status': 'up',
+ 'mac_address': '00A098A4B28D', 'name': 'wan0', 'ntp_config_method': 'stat',
+ 'ntp_servers': [
+ {'addrType': 'ipvx', 'domainName': None,
+ 'ipvxAddress': {'addressType': 'ipv4', 'ipv4Address': '216.239.35.0', 'ipv6Address': None}},
+ {'addrType': 'ipvx', 'domainName': None,
+ 'ipvxAddress': {'addressType': 'ipv4', 'ipv4Address': '216.239.35.4', 'ipv6Address': None}}],
+ 'remote_ssh_access': True}],
+ 'netapp_storage_array': {'cache_block_sizes': [4096, 8192, 16384, 32768], 'chassis_serial': '021633035190',
+ 'firmware': '08.42.30.05', 'name': 'ictm0718s01c1',
+ 'segment_sizes': [32768, 65536, 131072, 262144, 524288, 495616, 655360, 1982464],
+ 'wwn': '600A098000A4B28D000000005CF10481'},
+ 'netapp_storage_pools': [
+ {'available_capacity': '8498142969856', 'id': '04000000600A098000A4B9D10000380A5D4AAC3C', 'name': 'beegfs_storage_vg',
+ 'total_capacity': '9597654597632', 'used_capacity': '1099511627776'}],
+ 'netapp_volumes': [
+ {'capacity': '1099511627776', 'id': '02000000600A098000A4B28D00003E435D4AAC54', 'is_thin_provisioned': False,
+ 'name': 'beegfs_storage_01_1', 'parent_storage_pool_id': '04000000600A098000A4B9D10000380A5D4AAC3C', 'workload': []}],
+ 'netapp_volumes_by_initiators': {'beegfs_metadata1': [],
+ 'beegfs_storage1': [{'id': '02000000600A098000A4B28D00003E435D4AAC54',
+ 'meta_data': {},
+ 'name': 'beegfs_storage_01_1',
+ 'raid_level': 'raid6',
+ 'segment_size_kb': 128,
+ 'stripe_count': -2,
+ 'workload_name': '',
+ 'wwn': '600A098000A4B28D00003E435D4AAC54'}],
+
+ 'test': [], 'test2': []},
+ 'netapp_workload_tags': [
+ {'attributes': [{'key': 'profileId', 'value': 'ansible_workload_1'}], 'id': '4200000001000000000000000000000000000000',
+ 'name': 'beegfs_metadata'},
+ {'attributes': [{'key': 'profileId', 'value': 'Other_1'}], 'id': '4200000002000000000000000000000000000000',
+ 'name': 'other_workload_1'}], 'snapshot_images': [], 'ssid': '1'}
+
+ def _set_args(self, **kwargs):
+ module_args = self.REQUIRED_PARAMS.copy()
+ if kwargs is not None:
+ module_args.update(kwargs)
+ set_module_args(module_args)
+
+ def test_get_controllers_pass(self):
+ """Verify get_controllers returns the expected results."""
+ self._set_args()
+ facts = Facts()
+ with mock.patch(self.REQUEST_FUNC, return_value=(200, ["070000000000000000000002", "070000000000000000000001"])):
+ self.assertEqual(facts.get_controllers(), {"070000000000000000000001": "A", "070000000000000000000002": "B"})
+
+ def test_get_controllers_fail(self):
+ """Verify get_controllers throws the expected exceptions."""
+ self._set_args()
+ facts = Facts()
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to retrieve controller list!"):
+ with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
+ facts.get_controllers()
+
+ def test_get_array_facts_pass(self):
+ """Verify get_array_facts method returns expected results."""
+ self.maxDiff = None
+ self._set_args()
+ facts = Facts()
+ facts.is_embedded = lambda: True
+ with mock.patch(self.GET_CONTROLLERS_FUNC, return_value={"070000000000000000000001": "A", "070000000000000000000002": "B"}):
+ with mock.patch(self.REQUEST_FUNC, side_effect=[(200, self.GRAPH_RESPONSE), (200, self.WORKLOAD_RESPONSE)]):
+ self.assertEquals(facts.get_array_facts(), self.EXPECTED_GET_ARRAY_FACTS)
diff --git a/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_firmware.py b/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_firmware.py
new file mode 100644
index 000000000..8c786d63b
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_firmware.py
@@ -0,0 +1,494 @@
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible.module_utils import six
+from ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_firmware import NetAppESeriesFirmware
+from units.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args
+from units.compat.mock import patch, mock_open
+
+if six.PY2:
+ builtin_path = "__builtin__.open"
+else:
+ builtin_path = "builtins.open"
+
+
+def mock_open_with_iter(*args, **kwargs):
+ mock = mock_open(*args, **kwargs)
+
+ if six.PY2:
+ mock.return_value.__iter__ = lambda x: iter(x.readline, "")
+ else:
+ mock.return_value.__iter__ = lambda x: x
+ mock.return_value.__next__ = lambda x: iter(x.readline, "")
+ return mock
+
+
+class FirmwareTest(ModuleTestCase):
+ REQUIRED_PARAMS = {"api_username": "username",
+ "api_password": "password",
+ "api_url": "http://localhost/devmgr/v2",
+ "ssid": "1",
+ "validate_certs": "no"}
+ REQUEST_FUNC = "ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_firmware.NetAppESeriesFirmware.request"
+ BASE_REQUEST_FUNC = "ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_firmware.request"
+ CREATE_MULTIPART_FORMDATA_FUNC = "ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_firmware.create_multipart_formdata"
+ SLEEP_FUNC = "ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_firmware.sleep"
+ BUNDLE_HEADER = b'combined_content\x00\x00\x00\x04\x00\x00\x07\xf8#Engenio Downloadable Package\n#Tue Jun 04 11:46:48 CDT 2019\ncheckList=compatibleBoard' \
+ b'Map,compatibleSubmodelMap,compatibleFirmwareMap,fileManifest\ncompatibleSubmodelMap=261|true,262|true,263|true,264|true,276|true,277|t' \
+ b'rue,278|true,282|true,300|true,301|true,302|true,318|true,319|true,320|true,321|true,322|true,323|true,324|true,325|true,326|true,328|t' \
+ b'rue,329|true,330|true,331|true,332|true,333|true,338|true,339|true,340|true,341|true,342|true,343|true,344|true,345|true,346|true,347|t' \
+ b'rue,356|true,357|true,390|true\nnonDisplayableAttributeList=512\ndisplayableAttributeList=FILENAME|RCB_11.40.5_280x_5ceef00e.dlp,VERSI' \
+ b'ON|11.40.5\ndacStoreLimit=512\nfileManifest=metadata.tar|metadata|08.42.50.00.000|c04275f98fc2f07bd63126fc57cb0569|bundle|10240,084250' \
+ b'00_m3_e30_842_root.img|linux|08.42.50.00|367c5216e5c4b15b904a025bff69f039|linux|1342177280,RC_08425000_m3_e30_842_280x.img|linux_cfw|0' \
+ b'8.42.50.00|e6589b0a50b29ff34b34d3ced8ae3ccb|eos|1073741824,msw.img|sam|11.42.0000.0028|ef3ee5589ab4a019a3e6f83768364aa1|linux|41943040' \
+ b'0,iom.img|iom|11.42.0G00.0003|9bb740f8d3a4e62a0f2da2ec83c254c4|linux|8177664\nmanagementVersionList=devmgr.v1142api8.Manager\ncompatib' \
+ b'leFirmwareMap=08.30.*.*|true,08.30.*.30|false,08.30.*.31|false,08.30.*.32|false,08.30.*.33|false,08.30.*.34|false,08.30.*.35|false,08.' \
+ b'30.*.36|false,08.30.*.37|false,08.30.*.38|false,08.30.*.39|false,08.40.*.*|true,08.40.*.30|false,08.40.*.31|false,08.40.*.32|false,08.4' \
+ b'0.*.33|false,08.40.*.34|false,08.40.*.35|false,08.40.*.36|false,08.40.*.37|false,08.40.*.38|false,08.40.*.39|false,08.41.*.*|true,08.4' \
+ b'1.*.30|false,08.41.*.31|false,08.41.*.32|false,08.41.*.33|false,08.41.*.34|false,08.41.*.35|false,08.41.*.36|false,08.41.*.37|false,08' \
+ b'.41.*.38|false,08.41.*.39|false,08.42.*.*|true,08.42.*.30|false,08.42.*.31|false,08.42.*.32|false,08.42.*.33|false,08.42.*.34|false,08' \
+ b'.42.*.35|false,08.42.*.36|false,08.42.*.37|false,08.42.*.38|false,08.42.*.39|false\nversion=08.42.50.00.000\ntype=tar\nversionTag=comb' \
+ b'ined_content\n'
+
+ NVSRAM_HEADER = b'nvsram \x00\x00\x00\x01\x00\x00\x00\xa0\x00\x00\x00\x04280X\x00\x00\x00\x00\x00\x00\x00\x032801 2804 2806 \x00\x00' \
+ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x1bArapaho controller, 8.52 FW\x00\x00\x001dual controller configuration, with cac' \
+ b'he battery\x07\x81A\x08Config\x00\x00\x0008.52.00.00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\xdc\xaf\x00\x00' \
+ b'\x94\xc1\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\x00\x00\x00\x00 2801 2804 2806 \x00\x00\x00\x00\x00' \
+ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
+ b'\x00\x00\x00\x00\x00\x00Board\n .Board Name = "NetApp RAID Controller"\n .NVSRAM Configuration Number' \
+ b' = "N280X-852834-D02"\n\nUserCfg\n .Enable Synchronous Negotiation = 0x00 \n'
+
+ def _set_args(self, args=None):
+ module_args = self.REQUIRED_PARAMS.copy()
+ if args is not None:
+ module_args.update(args)
+ set_module_args(module_args)
+
+ def test_is_firmware_bundled_pass(self):
+ """Determine whether firmware file is bundled."""
+ self._set_args({"firmware": "test.dlp", "nvsram": "test.dlp"})
+ with patch(builtin_path, mock_open(read_data=b"firmwarexxxxxxxx")) as mock_file:
+ firmware = NetAppESeriesFirmware()
+ self.assertEqual(firmware.is_firmware_bundled(), False)
+
+ self._set_args({"firmware": "test.dlp", "nvsram": "test.dlp"})
+ with patch(builtin_path, mock_open(read_data=self.BUNDLE_HEADER[:16])) as mock_file:
+ firmware = NetAppESeriesFirmware()
+ self.assertEqual(firmware.is_firmware_bundled(), True)
+
+ def test_is_firmware_bundles_fail(self):
+ """Verify non-firmware fails."""
+ self._set_args({"firmware": "test.dlp", "nvsram": "test.dlp"})
+ with patch(builtin_path, mock_open(read_data=b"xxxxxxxxxxxxxxxx")) as mock_file:
+ firmware = NetAppESeriesFirmware()
+ with self.assertRaisesRegexp(AnsibleFailJson, "Firmware file is invalid."):
+ firmware.is_firmware_bundled()
+
+ def test_firmware_version(self):
+ """Verify correct firmware version is returned."""
+ self._set_args({"firmware": "test.dlp", "nvsram": "test.dlp"})
+ firmware = NetAppESeriesFirmware()
+ firmware.is_firmware_bundled = lambda: True
+ with patch(builtin_path, mock_open_with_iter(read_data=self.BUNDLE_HEADER)) as mock_file:
+ self.assertEqual(firmware.firmware_version(), b"11.40.5")
+
+ def test_nvsram_version(self):
+ """Verify correct nvsram version is returned."""
+ self._set_args({"firmware": "test.dlp", "nvsram": "test.dlp"})
+ firmware = NetAppESeriesFirmware()
+
+ with patch(builtin_path, mock_open_with_iter(read_data=self.NVSRAM_HEADER)) as mock_file:
+ self.assertEqual(firmware.nvsram_version(), b"N280X-852834-D02")
+
+ def test_check_system_health_pass(self):
+ """Validate check_system_health method."""
+ self._set_args({"firmware": "test.dlp", "nvsram": "test.dlp"})
+ firmware = NetAppESeriesFirmware()
+ with patch(self.REQUEST_FUNC, return_value=(200, {"successful": True})):
+ firmware.check_system_health()
+
+ def test_check_system_health_fail(self):
+ """Validate check_system_health method throws proper exceptions."""
+ self._set_args({"firmware": "test.dlp", "nvsram": "test.dlp"})
+ firmware = NetAppESeriesFirmware()
+ with patch(self.SLEEP_FUNC, return_value=None):
+ with self.assertRaisesRegexp(AnsibleFailJson, "Health check failed!"):
+ with patch(self.REQUEST_FUNC, return_value=(404, Exception())):
+ firmware.check_system_health()
+
+ def test_embedded_check_nvsram_compatibility_pass(self):
+ """Verify embedded nvsram compatibility."""
+ self._set_args({"firmware": "test.dlp", "nvsram": "test.dlp"})
+ firmware = NetAppESeriesFirmware()
+ with patch(self.CREATE_MULTIPART_FORMDATA_FUNC, return_value=("", {})):
+ with patch(self.REQUEST_FUNC, return_value=(200, {"signatureTestingPassed": True,
+ "fileCompatible": True,
+ "versionContents": [{"module": "nvsram",
+ "bundledVersion": "N280X-842834-D02",
+ "onboardVersion": "N280X-842834-D02"}]})):
+ firmware.embedded_check_nvsram_compatibility()
+
+ def test_embedded_check_nvsram_compatibility_fail(self):
+ """Verify embedded nvsram compatibility fails with expected exceptions."""
+ self._set_args({"firmware": "test.dlp", "nvsram": "test.dlp"})
+ firmware = NetAppESeriesFirmware()
+
+ with patch(self.CREATE_MULTIPART_FORMDATA_FUNC, return_value=("", {})):
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to retrieve NVSRAM compatibility results."):
+ with patch(self.REQUEST_FUNC, return_value=Exception()):
+ firmware.embedded_check_nvsram_compatibility()
+
+ with self.assertRaisesRegexp(AnsibleFailJson, "Invalid NVSRAM file."):
+ with patch(self.REQUEST_FUNC, return_value=(200, {"signatureTestingPassed": False,
+ "fileCompatible": False,
+ "versionContents": [{"module": "nvsram",
+ "bundledVersion": "N280X-842834-D02",
+ "onboardVersion": "N280X-842834-D02"}]})):
+ firmware.embedded_check_nvsram_compatibility()
+
+ with self.assertRaisesRegexp(AnsibleFailJson, "Incompatible NVSRAM file."):
+ with patch(self.REQUEST_FUNC, return_value=(200, {"signatureTestingPassed": True,
+ "fileCompatible": False,
+ "versionContents": [{"module": "nvsram",
+ "bundledVersion": "N280X-842834-D02",
+ "onboardVersion": "N280X-842834-D02"}]})):
+ firmware.embedded_check_nvsram_compatibility()
+
+ def test_embedded_check_firmware_compatibility_pass(self):
+ """Verify embedded firmware compatibility."""
+ self._set_args({"firmware": "test.dlp", "nvsram": "test.dlp"})
+ firmware = NetAppESeriesFirmware()
+
+ with patch(self.CREATE_MULTIPART_FORMDATA_FUNC, return_value=("", {})):
+ with patch(self.REQUEST_FUNC, return_value=(200, {
+ "signatureTestingPassed": True,
+ "fileCompatible": True,
+ "versionContents": [
+ {"module": "bundle", "bundledVersion": "08.42.50.00.000", "onboardVersion": "08.42.30.05"},
+ {"module": "bundleDisplay", "bundledVersion": "11.40.5", "onboardVersion": "11.40.3R2"},
+ {"module": "hypervisor", "bundledVersion": "08.42.50.00", "onboardVersion": "08.42.30.05"},
+ {"module": "raid", "bundledVersion": "08.42.50.00", "onboardVersion": "08.42.30.05"},
+ {"module": "management", "bundledVersion": "11.42.0000.0028", "onboardVersion": "11.42.0000.0026"},
+ {"module": "iom", "bundledVersion": "11.42.0G00.0003", "onboardVersion": "11.42.0G00.0001"}]})):
+ firmware.embedded_check_bundle_compatibility()
+
+ def test_embedded_check_firmware_compatibility_fail(self):
+ """Verify embedded firmware compatibility fails with expected exceptions."""
+ self._set_args({"firmware": "test.dlp", "nvsram": "test.dlp"})
+ firmware = NetAppESeriesFirmware()
+
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to retrieve bundle compatibility results."):
+ with patch(self.CREATE_MULTIPART_FORMDATA_FUNC, return_value=("", {})):
+ with patch(self.REQUEST_FUNC, return_value=Exception()):
+ firmware.embedded_check_bundle_compatibility()
+
+ with self.assertRaisesRegexp(AnsibleFailJson, "Invalid firmware bundle file."):
+ with patch(self.CREATE_MULTIPART_FORMDATA_FUNC, return_value=("", {})):
+ with patch(self.REQUEST_FUNC, return_value=(200, {
+ "signatureTestingPassed": False,
+ "fileCompatible": True,
+ "versionContents": [
+ {"module": "bundle", "bundledVersion": "08.42.50.00.000", "onboardVersion": "08.42.30.05"},
+ {"module": "bundleDisplay", "bundledVersion": "11.40.5", "onboardVersion": "11.40.3R2"},
+ {"module": "hypervisor", "bundledVersion": "08.42.50.00", "onboardVersion": "08.42.30.05"},
+ {"module": "raid", "bundledVersion": "08.42.50.00", "onboardVersion": "08.42.30.05"},
+ {"module": "management", "bundledVersion": "11.42.0000.0028", "onboardVersion": "11.42.0000.0026"},
+ {"module": "iom", "bundledVersion": "11.42.0G00.0003", "onboardVersion": "11.42.0G00.0001"}]})):
+ firmware.embedded_check_bundle_compatibility()
+
+ with self.assertRaisesRegexp(AnsibleFailJson, "Incompatible firmware bundle file."):
+ with patch(self.CREATE_MULTIPART_FORMDATA_FUNC, return_value=("", {})):
+ with patch(self.REQUEST_FUNC, return_value=(200, {
+ "signatureTestingPassed": True,
+ "fileCompatible": False,
+ "versionContents": [
+ {"module": "bundle", "bundledVersion": "08.42.50.00.000", "onboardVersion": "08.42.30.05"},
+ {"module": "bundleDisplay", "bundledVersion": "11.40.5", "onboardVersion": "11.40.3R2"},
+ {"module": "hypervisor", "bundledVersion": "08.42.50.00", "onboardVersion": "08.42.30.05"},
+ {"module": "raid", "bundledVersion": "08.42.50.00", "onboardVersion": "08.42.30.05"},
+ {"module": "management", "bundledVersion": "11.42.0000.0028", "onboardVersion": "11.42.0000.0026"},
+ {"module": "iom", "bundledVersion": "11.42.0G00.0003", "onboardVersion": "11.42.0G00.0001"}]})):
+ firmware.embedded_check_bundle_compatibility()
+
+ with self.assertRaisesRegexp(AnsibleFailJson, "Downgrades are not permitted."):
+ with patch(self.CREATE_MULTIPART_FORMDATA_FUNC, return_value=("", {})):
+ with patch(self.REQUEST_FUNC, return_value=(200, {
+ "signatureTestingPassed": True,
+ "fileCompatible": True,
+ "versionContents": [
+ {"module": "bundle", "bundledVersion": "08.42.00.00.000", "onboardVersion": "08.50.30.05"},
+ {"module": "bundleDisplay", "bundledVersion": "11.40.5", "onboardVersion": "11.40.3R2"},
+ {"module": "hypervisor", "bundledVersion": "08.42.50.00", "onboardVersion": "08.42.30.05"},
+ {"module": "raid", "bundledVersion": "08.42.50.00", "onboardVersion": "08.42.30.05"},
+ {"module": "management", "bundledVersion": "11.42.0000.0028", "onboardVersion": "11.42.0000.0026"},
+ {"module": "iom", "bundledVersion": "11.42.0G00.0003", "onboardVersion": "11.42.0G00.0001"}]})):
+ firmware.embedded_check_bundle_compatibility()
+ with self.assertRaisesRegexp(AnsibleFailJson, "Downgrades are not permitted."):
+ with patch(self.CREATE_MULTIPART_FORMDATA_FUNC, return_value=("", {})):
+ with patch(self.REQUEST_FUNC, return_value=(200, {
+ "signatureTestingPassed": True,
+ "fileCompatible": True,
+ "versionContents": [
+ {"module": "bundle", "bundledVersion": "08.42.00.00.000", "onboardVersion": "09.20.30.05"},
+ {"module": "bundleDisplay", "bundledVersion": "11.40.5", "onboardVersion": "11.40.3R2"},
+ {"module": "hypervisor", "bundledVersion": "08.42.50.00", "onboardVersion": "08.42.30.05"},
+ {"module": "raid", "bundledVersion": "08.42.50.00", "onboardVersion": "08.42.30.05"},
+ {"module": "management", "bundledVersion": "11.42.0000.0028", "onboardVersion": "11.42.0000.0026"},
+ {"module": "iom", "bundledVersion": "11.42.0G00.0003", "onboardVersion": "11.42.0G00.0001"}]})):
+ firmware.embedded_check_bundle_compatibility()
+
+ def test_wait_for_web_services_pass(self):
+ """Verify controller reboot wait succeeds."""
+ self._set_args({"firmware": "test.dlp", "nvsram": "test.dlp"})
+ firmware = NetAppESeriesFirmware()
+ firmware.firmware_version = lambda: b"08.42.30.05"
+ firmware.nvsram_version = lambda: b"N280X-842834-D02"
+ firmware.is_firmware_bundled = lambda: False
+ with patch(self.SLEEP_FUNC, return_value=None):
+ with patch(self.REQUEST_FUNC, side_effect=[(200, ["08.42.30.05"]), (200, ["N280X-842834-D02"]), (200, {"status": "optimal"})]):
+ firmware.wait_for_web_services()
+
+ def test_wait_for_web_services_fail(self):
+ """Verify controller reboot wait throws expected exceptions"""
+ self._set_args({"firmware": "test.dlp", "nvsram": "test.dlp"})
+ firmware = NetAppESeriesFirmware()
+ with self.assertRaisesRegexp(AnsibleFailJson, "Timeout waiting for Santricity Web Services."):
+ with patch(self.SLEEP_FUNC, return_value=None):
+ with patch(self.BASE_REQUEST_FUNC, return_value=Exception()):
+ firmware.wait_for_web_services()
+
+ def test_check_nvsram_compatibility_pass(self):
+ """Verify proxy nvsram compatibility."""
+ self._set_args({"firmware": "test.dlp", "nvsram": "test_nvsram.dlp"})
+ firmware = NetAppESeriesFirmware()
+ with patch(self.SLEEP_FUNC, return_value=None):
+ with patch(self.REQUEST_FUNC, side_effect=[(200, {"requestId": 1}),
+ (200, {"checkRunning": True}),
+ (200, {"checkRunning": False,
+ "results": [{"nvsramFiles": [{"filename": "test_nvsram.dlp"}]}]})]):
+ firmware.proxy_check_nvsram_compatibility()
+
+ def test_check_nvsram_compatibility_fail(self):
+ """Verify proxy nvsram compatibility throws expected exceptions."""
+ self._set_args({"firmware": "test.dlp", "nvsram": "test_nvsram.dlp"})
+ firmware = NetAppESeriesFirmware()
+ with patch(self.SLEEP_FUNC, return_value=None):
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to receive NVSRAM compatibility information."):
+ with patch(self.REQUEST_FUNC, return_value=Exception()):
+ firmware.proxy_check_nvsram_compatibility()
+
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to retrieve NVSRAM status update from proxy."):
+ with patch(self.REQUEST_FUNC, side_effect=[(200, {"requestId": 1}), Exception()]):
+ firmware.proxy_check_nvsram_compatibility()
+
+ with self.assertRaisesRegexp(AnsibleFailJson, "NVSRAM is not compatible."):
+ with patch(self.REQUEST_FUNC, side_effect=[(200, {"requestId": 1}),
+ (200, {"checkRunning": True}),
+ (200, {"checkRunning": False,
+ "results": [{"nvsramFiles": [{"filename": "not_test_nvsram.dlp"}]}]})]):
+ firmware.proxy_check_nvsram_compatibility()
+
+ def test_proxy_check_firmware_compatibility_pass(self):
+ """Verify proxy firmware compatibility."""
+ self._set_args({"firmware": "test_firmware.dlp", "nvsram": "test_nvsram.dlp"})
+ firmware = NetAppESeriesFirmware()
+ with patch(self.SLEEP_FUNC, return_value=None):
+ with patch(self.REQUEST_FUNC, side_effect=[(200, {"requestId": 1}),
+ (200, {"checkRunning": True}),
+ (200, {"checkRunning": False,
+ "results": [{"cfwFiles": [{"filename": "test_firmware.dlp"}]}]})]):
+ firmware.proxy_check_firmware_compatibility()
+
+ def test_proxy_check_firmware_compatibility_fail(self):
+ """Verify proxy firmware compatibility throws expected exceptions."""
+ self._set_args({"firmware": "test_firmware.dlp", "nvsram": "test_nvsram.dlp"})
+ firmware = NetAppESeriesFirmware()
+
+ with patch(self.SLEEP_FUNC, return_value=None):
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to receive firmware compatibility information."):
+ with patch(self.REQUEST_FUNC, return_value=Exception()):
+ firmware.proxy_check_firmware_compatibility()
+
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to retrieve firmware status update from proxy."):
+ with patch(self.REQUEST_FUNC, side_effect=[(200, {"requestId": 1}), Exception()]):
+ firmware.proxy_check_firmware_compatibility(retries=0)
+
+ with self.assertRaisesRegexp(AnsibleFailJson, "Firmware bundle is not compatible."):
+ with patch(self.REQUEST_FUNC, side_effect=[(200, {"requestId": 1}),
+ (200, {"checkRunning": True}),
+ (200, {"checkRunning": False, "results": [{"cfwFiles": [{"filename": "not_test_firmware.dlp"}]}]})]):
+ firmware.proxy_check_firmware_compatibility(retries=0)
+
+ def test_proxy_upload_and_check_compatibility_pass(self):
+ """Verify proxy_upload_and_check_compatibility"""
+ self._set_args({"firmware": "test_firmware.dlp", "nvsram": "test_nvsram.dlp"})
+ firmware = NetAppESeriesFirmware()
+ firmware.proxy_check_nvsram_compatibility = lambda: None
+ firmware.proxy_check_firmware_compatibility = lambda: None
+ with patch(self.CREATE_MULTIPART_FORMDATA_FUNC, return_value=("headers", "data")):
+ with patch(self.REQUEST_FUNC, side_effect=[(200, [{"version": "XX.XX.XX.XX", "filename": "test"},
+ {"version": "XXXXXXXXXX", "filename": "test.dlp"}]),
+ (200, None), (200, None)]):
+ firmware.proxy_upload_and_check_compatibility()
+
+ with patch(self.REQUEST_FUNC, side_effect=[(200, [{"version": "XX.XX.XX.XX", "filename": "test"},
+ {"version": "test_nvsram", "filename": "test_nvsram.dlp"},
+ {"version": "test", "filename": "test.dlp"},
+ {"filename": "test_firmware.dlp", "version": "test_firmware"}]),
+ (200, None), (200, None)]):
+ firmware.proxy_upload_and_check_compatibility()
+
+ def test_proxy_upload_and_check_compatibility_fail(self):
+ """Verify proxy_upload_and_check_compatibility throws expected exceptions."""
+ self._set_args({"firmware": "test_firmware.dlp", "nvsram": "test_nvsram.dlp"})
+ firmware = NetAppESeriesFirmware()
+ firmware.proxy_check_nvsram_compatibility = lambda: None
+ firmware.proxy_check_firmware_compatibility = lambda: None
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to retrieve existing firmware files."):
+ with patch(self.CREATE_MULTIPART_FORMDATA_FUNC, return_value=("headers", "data")):
+ with patch(self.REQUEST_FUNC, return_value=Exception()):
+ firmware.proxy_upload_and_check_compatibility()
+
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to upload NVSRAM file."):
+ with patch(self.CREATE_MULTIPART_FORMDATA_FUNC, return_value=("headers", "data")):
+ with patch(self.REQUEST_FUNC, side_effect=[(200, [{"version": "XX.XX.XX.XX", "filename": "test"},
+ {"version": "XXXXXXXXXX", "filename": "test.dlp"},
+ {"filename": "test_firmware.dlp", "version": "test_firmware"}]),
+ Exception()]):
+ firmware.proxy_upload_and_check_compatibility()
+
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to upload firmware bundle file."):
+ with patch(self.CREATE_MULTIPART_FORMDATA_FUNC, return_value=("headers", "data")):
+ with patch(self.REQUEST_FUNC, side_effect=[(200, [{"version": "XX.XX.XX.XX", "filename": "test"},
+ {"version": "test_nvsram", "filename": "test_nvsram.dlp"},
+ {"version": "XXXXXXXXXX", "filename": "test.dlp"}]),
+ Exception()]):
+ firmware.proxy_upload_and_check_compatibility()
+
+ def test_proxy_check_upgrade_required_pass(self):
+ """Verify proxy_check_upgrade_required."""
+ self._set_args({"firmware": "test_firmware.dlp", "nvsram": "test_nvsram.dlp"})
+ firmware = NetAppESeriesFirmware()
+ firmware.firmware_version = lambda: b"08.42.50.00"
+ firmware.nvsram_version = lambda: b"nvsram_version"
+ with patch(self.REQUEST_FUNC, side_effect=[(200, [{"versionString": "08.42.50.00"}]), (200, ["nvsram_version"])]):
+ firmware.is_firmware_bundled = lambda: True
+ firmware.proxy_check_upgrade_required()
+ self.assertFalse(firmware.upgrade_required)
+
+ with patch(self.REQUEST_FUNC, side_effect=[(200, ["08.42.50.00"]), (200, ["nvsram_version"])]):
+ firmware.is_firmware_bundled = lambda: False
+ firmware.proxy_check_upgrade_required()
+ self.assertFalse(firmware.upgrade_required)
+
+ self._set_args({"firmware": "test_firmware.dlp", "nvsram": "test_nvsram.dlp"})
+ firmware = NetAppESeriesFirmware()
+ firmware.firmware_version = lambda: b"08.42.50.00"
+ firmware.nvsram_version = lambda: b"not_nvsram_version"
+ with patch(self.REQUEST_FUNC, side_effect=[(200, [{"versionString": "08.42.50.00"}]), (200, ["nvsram_version"])]):
+ firmware.is_firmware_bundled = lambda: True
+ firmware.proxy_check_upgrade_required()
+ self.assertTrue(firmware.upgrade_required)
+
+ with patch(self.REQUEST_FUNC, side_effect=[(200, ["08.42.50.00"]), (200, ["nvsram_version"])]):
+ firmware.is_firmware_bundled = lambda: False
+ firmware.proxy_check_upgrade_required()
+ self.assertTrue(firmware.upgrade_required)
+
+ self._set_args({"firmware": "test_firmware.dlp", "nvsram": "test_nvsram.dlp"})
+ firmware = NetAppESeriesFirmware()
+ firmware.firmware_version = lambda: b"08.52.00.00"
+ firmware.nvsram_version = lambda: b"nvsram_version"
+ with patch(self.REQUEST_FUNC, side_effect=[(200, [{"versionString": "08.42.50.00"}]), (200, ["nvsram_version"])]):
+ firmware.is_firmware_bundled = lambda: True
+ firmware.proxy_check_upgrade_required()
+ self.assertTrue(firmware.upgrade_required)
+
+ with patch(self.REQUEST_FUNC, side_effect=[(200, ["08.42.50.00"]), (200, ["nvsram_version"])]):
+ firmware.is_firmware_bundled = lambda: False
+ firmware.proxy_check_upgrade_required()
+ self.assertTrue(firmware.upgrade_required)
+
+ self._set_args({"firmware": "test_firmware.dlp", "nvsram": "test_nvsram.dlp"})
+ firmware = NetAppESeriesFirmware()
+ firmware.firmware_version = lambda: b"08.52.00.00"
+ firmware.nvsram_version = lambda: b"not_nvsram_version"
+ with patch(self.REQUEST_FUNC, side_effect=[(200, [{"versionString": "08.42.50.00"}]), (200, ["nvsram_version"])]):
+ firmware.is_firmware_bundled = lambda: True
+ firmware.proxy_check_upgrade_required()
+ self.assertTrue(firmware.upgrade_required)
+
+ with patch(self.REQUEST_FUNC, side_effect=[(200, ["08.42.50.00"]), (200, ["nvsram_version"])]):
+ firmware.is_firmware_bundled = lambda: False
+ firmware.proxy_check_upgrade_required()
+ self.assertTrue(firmware.upgrade_required)
+
+ def test_proxy_check_upgrade_required_fail(self):
+ """Verify proxy_check_upgrade_required throws expected exceptions."""
+ self._set_args({"firmware": "test_firmware.dlp", "nvsram": "test_nvsram.dlp"})
+ firmware = NetAppESeriesFirmware()
+
+ firmware.firmware_version = lambda: b"08.42.50.00"
+ firmware.nvsram_version = lambda: b"not_nvsram_version"
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to retrieve controller firmware information."):
+ with patch(self.REQUEST_FUNC, return_value=Exception()):
+ firmware.proxy_check_upgrade_required()
+
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to retrieve storage system's NVSRAM version."):
+ with patch(self.REQUEST_FUNC, side_effect=[(200, [{"versionString": "08.42.50.00"}]), Exception()]):
+ firmware.is_firmware_bundled = lambda: True
+ firmware.proxy_check_upgrade_required()
+
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to retrieve storage system's NVSRAM version."):
+ with patch(self.REQUEST_FUNC, side_effect=[(200, ["08.42.50.00"]), Exception()]):
+ firmware.is_firmware_bundled = lambda: False
+ firmware.proxy_check_upgrade_required()
+
+ with self.assertRaisesRegexp(AnsibleFailJson, "Downgrades are not permitted."):
+ with patch(self.REQUEST_FUNC, side_effect=[(200, [{"versionString": "08.42.50.00"}]), (200, ["nvsram_version"])]):
+ firmware.firmware_version = lambda: b"08.40.00.00"
+ firmware.nvsram_version = lambda: "nvsram_version"
+ firmware.is_firmware_bundled = lambda: True
+ firmware.proxy_check_upgrade_required()
+
+ with self.assertRaisesRegexp(AnsibleFailJson, "Downgrades are not permitted."):
+ with patch(self.REQUEST_FUNC, side_effect=[(200, ["08.42.50.00"]), (200, ["nvsram_version"])]):
+ firmware.is_firmware_bundled = lambda: False
+ firmware.proxy_check_upgrade_required()
+
+ def test_proxy_wait_for_upgrade_pass(self):
+ """Verify proxy_wait_for_upgrade."""
+ with patch(self.SLEEP_FUNC, return_value=None):
+ self._set_args({"firmware": "test_firmware.dlp", "nvsram": "expected_nvsram.dlp"})
+ firmware = NetAppESeriesFirmware()
+
+ with patch(self.REQUEST_FUNC, side_effect=[(200, {"running": True}),
+ (200, {"running": False, "activationCompletionTime": "completion_time"})]):
+ firmware.proxy_wait_for_upgrade()
+
+ def test_proxy_wait_for_upgrade_fail(self):
+ """Verify proxy_wait_for_upgrade throws expected exceptions."""
+ with patch(self.SLEEP_FUNC, return_value=None):
+ self._set_args({"firmware": "test_firmware.dlp", "nvsram": "test_nvsram.dlp"})
+ firmware = NetAppESeriesFirmware()
+
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to complete upgrade."):
+ with patch(self.REQUEST_FUNC, return_value=(200, {"running": False, "activationCompletionTime": None})):
+ firmware.proxy_wait_for_upgrade()
+
+ def test_proxy_upgrade_fail(self):
+ """Verify proxy_upgrade throws expected exceptions."""
+ self._set_args({"firmware": "test_firmware.dlp", "nvsram": "test_nvsram.dlp"})
+ firmware = NetAppESeriesFirmware()
+
+ firmware.is_firmware_bundled = lambda: True
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to initiate firmware upgrade."):
+ with patch(self.REQUEST_FUNC, return_value=Exception()):
+ firmware.proxy_upgrade()
+
+ firmware.is_firmware_bundled = lambda: False
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to initiate firmware upgrade."):
+ with patch(self.REQUEST_FUNC, return_value=Exception()):
+ firmware.proxy_upgrade()
diff --git a/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_global.py b/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_global.py
new file mode 100644
index 000000000..44ba8f4ab
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_global.py
@@ -0,0 +1,494 @@
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_global import NetAppESeriesGlobalSettings
+from units.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args
+from units.compat.mock import patch, mock_open
+
+
+class GlobalSettingsTest(ModuleTestCase):
+ REQUIRED_PARAMS = {
+ 'api_username': 'rw',
+ 'api_password': 'password',
+ 'api_url': 'http://localhost',
+ 'ssid': '1',
+ }
+ REQ_FUNC = 'ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_global.NetAppESeriesGlobalSettings.request'
+
+ def _set_args(self, args=None):
+ module_args = self.REQUIRED_PARAMS.copy()
+ if args is not None:
+ module_args.update(args)
+ set_module_args(module_args)
+
+ def test_init_pass(self):
+ """Verify module instantiates successfully."""
+ self._set_args({"cache_block_size": 32768, "cache_flush_threshold": 80, "default_host_type": "linux dm-mp", "automatic_load_balancing": "enabled",
+ "host_connectivity_reporting": "enabled", "name": "array1"})
+ instance = NetAppESeriesGlobalSettings()
+
+ self._set_args({"cache_block_size": 32768, "cache_flush_threshold": 80, "default_host_type": "linux dm-mp", "automatic_load_balancing": "disabled",
+ "host_connectivity_reporting": "disabled", "name": "array1"})
+ instance = NetAppESeriesGlobalSettings()
+
+ self._set_args({"cache_block_size": 32768, "cache_flush_threshold": 80, "default_host_type": "linux dm-mp", "automatic_load_balancing": "disabled",
+ "host_connectivity_reporting": "enabled", "name": "array1"})
+ instance = NetAppESeriesGlobalSettings()
+
+ def test_init_fail(self):
+ """Verify module fails when autoload is enabled but host connectivity reporting is not."""
+ self._set_args({"automatic_load_balancing": "enabled", "host_connectivity_reporting": "disabled"})
+ with self.assertRaisesRegexp(AnsibleFailJson, r"Option automatic_load_balancing requires host_connectivity_reporting to be enabled."):
+ instance = NetAppESeriesGlobalSettings()
+
+ def test_get_current_configuration_pass(self):
+ """Ensure get_current_configuration method succeeds."""
+ self._set_args({"cache_block_size": 32768, "cache_flush_threshold": 80, "default_host_type": "linux dm-mp", "automatic_load_balancing": "enabled",
+ "host_connectivity_reporting": "enabled", "name": "array1"})
+ instance = NetAppESeriesGlobalSettings()
+ with patch(self.REQ_FUNC, side_effect=[(200, {"productCapabilities": [], "featureParameters": {"cacheBlockSizes": []}}), (200, []),
+ (200, [{"defaultHostTypeIndex": 28, "cache": {"cacheBlkSize": 32768, "demandFlushThreshold": 90}}]),
+ (200, {"autoLoadBalancingEnabled": True, "hostConnectivityReportingEnabled": True, "name": "array1"})]):
+ self.assertEqual(instance.get_current_configuration(), {"autoload_capable": False, "autoload_enabled": True, "cache_block_size_options": [],
+ "cache_settings": {"cache_block_size": 32768, "cache_flush_threshold": 90},
+ "default_host_type_index": 28, "host_connectivity_reporting_enabled": True,
+ "host_type_options": {}, "name": 'array1'})
+
+ def test_get_current_configuration_fail(self):
+ """Ensure exceptions are thrown when current configuration requests fail."""
+ self._set_args({"cache_block_size": 32768, "cache_flush_threshold": 80, "default_host_type": "linux dm-mp", "automatic_load_balancing": "enabled",
+ "host_connectivity_reporting": "enabled", "name": "array1"})
+ instance = NetAppESeriesGlobalSettings()
+ with self.assertRaisesRegexp(AnsibleFailJson, r"Failed to retrieve storage array capabilities."):
+ with patch(self.REQ_FUNC, side_effect=[Exception()]):
+ instance.get_current_configuration()
+
+ self._set_args({"cache_block_size": 32768, "cache_flush_threshold": 80, "default_host_type": "linux dm-mp", "automatic_load_balancing": "enabled",
+ "host_connectivity_reporting": "enabled", "name": "array1"})
+ instance = NetAppESeriesGlobalSettings()
+ with self.assertRaisesRegexp(AnsibleFailJson, r"Failed to retrieve storage array host options."):
+ with patch(self.REQ_FUNC, side_effect=[(200, {"productCapabilities": [], "featureParameters": {"cacheBlockSizes": []}}), Exception()]):
+ instance.get_current_configuration()
+
+ self._set_args({"cache_block_size": 32768, "cache_flush_threshold": 80, "default_host_type": "linux dm-mp", "automatic_load_balancing": "enabled",
+ "host_connectivity_reporting": "enabled", "name": "array1"})
+ instance = NetAppESeriesGlobalSettings()
+ with self.assertRaisesRegexp(AnsibleFailJson, r"Failed to retrieve cache settings."):
+ with patch(self.REQ_FUNC, side_effect=[(200, {"productCapabilities": [], "featureParameters": {"cacheBlockSizes": []}}), (200, []), Exception()]):
+ instance.get_current_configuration()
+
+ self._set_args({"cache_block_size": 32768, "cache_flush_threshold": 80, "default_host_type": "linux dm-mp", "automatic_load_balancing": "enabled",
+ "host_connectivity_reporting": "enabled", "name": "array1"})
+ instance = NetAppESeriesGlobalSettings()
+ with self.assertRaisesRegexp(AnsibleFailJson, r"Failed to determine current configuration."):
+ with patch(self.REQ_FUNC, side_effect=[(200, {"productCapabilities": [], "featureParameters": {"cacheBlockSizes": []}}), (200, []),
+ (200, [{"defaultHostTypeIndex": 28, "cache": {"cacheBlkSize": 32768, "demandFlushThreshold": 90}}]),
+ Exception()]):
+ instance.get_current_configuration()
+
+ def test_cache_block_size_pass(self):
+ """Verify cache_block_size passes successfully."""
+ self._set_args({"cache_flush_threshold": 80, "default_host_type": "linux dm-mp", "automatic_load_balancing": "enabled",
+ "host_connectivity_reporting": "enabled", "name": "array1"})
+ instance = NetAppESeriesGlobalSettings()
+ instance.get_current_configuration = lambda: {"autoload_capable": False, "autoload_enabled": True, "cache_block_size_options": [16384, 32768],
+ "cache_settings": {"cache_block_size": 32768, "cache_flush_threshold": 90},
+ "default_host_type_index": 28, "host_connectivity_reporting_enabled": True,
+ "host_type_options": {}, "name": 'array1'}
+ self.assertFalse(instance.change_cache_block_size_required())
+
+ self._set_args({"cache_block_size": 32768, "cache_flush_threshold": 80, "default_host_type": "linux dm-mp", "automatic_load_balancing": "enabled",
+ "host_connectivity_reporting": "enabled", "name": "array1"})
+ instance = NetAppESeriesGlobalSettings()
+ instance.get_current_configuration = lambda: {"autoload_capable": False, "autoload_enabled": True, "cache_block_size_options": [16384, 32768],
+ "cache_settings": {"cache_block_size": 32768, "cache_flush_threshold": 90},
+ "default_host_type_index": 28, "host_connectivity_reporting_enabled": True,
+ "host_type_options": {}, "name": 'array1'}
+ self.assertFalse(instance.change_cache_block_size_required())
+
+ self._set_args({"cache_block_size": 16384, "cache_flush_threshold": 80, "default_host_type": "linux dm-mp", "automatic_load_balancing": "enabled",
+ "host_connectivity_reporting": "enabled", "name": "array1"})
+ instance = NetAppESeriesGlobalSettings()
+ instance.get_current_configuration = lambda: {"autoload_capable": False, "autoload_enabled": True, "cache_block_size_options": [16384, 32768],
+ "cache_settings": {"cache_block_size": 32768, "cache_flush_threshold": 90},
+ "default_host_type_index": 28, "host_connectivity_reporting_enabled": True,
+ "host_type_options": {}, "name": 'array1'}
+ self.assertTrue(instance.change_cache_block_size_required())
+
+ def test_cache_block_size_fail(self):
+ """Verify cache_block_size throws expected exceptions."""
+ self._set_args({"cache_block_size": 16384, "cache_flush_threshold": 80, "default_host_type": "linux dm-mp", "automatic_load_balancing": "enabled",
+ "host_connectivity_reporting": "enabled", "name": "array1"})
+ instance = NetAppESeriesGlobalSettings()
+ instance.get_current_configuration = lambda: {"autoload_capable": False, "autoload_enabled": True, "cache_block_size_options": [32768],
+ "cache_settings": {"cache_block_size": 32768, "cache_flush_threshold": 90},
+ "default_host_type_index": 28, "host_connectivity_reporting_enabled": True,
+ "host_type_options": {}, "name": 'array1'}
+ with self.assertRaisesRegexp(AnsibleFailJson, r"Invalid cache block size."):
+ self.assertTrue(instance.change_cache_block_size_required())
+
+ def test_change_cache_flush_threshold_required_pass(self):
+ """Verify change_cache_block_size_required passes successfully."""
+ self._set_args({"cache_block_size": 32768, "default_host_type": "linux dm-mp", "automatic_load_balancing": "enabled",
+ "host_connectivity_reporting": "enabled", "name": "array1"})
+ instance = NetAppESeriesGlobalSettings()
+ instance.get_current_configuration = lambda: {"autoload_capable": False, "autoload_enabled": True, "cache_block_size_options": [16384, 32768],
+ "cache_settings": {"cache_block_size": 32768, "cache_flush_threshold": 80},
+ "default_host_type_index": 28, "host_connectivity_reporting_enabled": True,
+ "host_type_options": {}, "name": 'array1'}
+ self.assertFalse(instance.change_cache_flush_threshold_required())
+
+ self._set_args({"cache_block_size": 32768, "cache_flush_threshold": 80, "default_host_type": "linux dm-mp", "automatic_load_balancing": "enabled",
+ "host_connectivity_reporting": "enabled", "name": "array1"})
+ instance = NetAppESeriesGlobalSettings()
+ instance.get_current_configuration = lambda: {"autoload_capable": False, "autoload_enabled": True, "cache_block_size_options": [16384, 32768],
+ "cache_settings": {"cache_block_size": 32768, "cache_flush_threshold": 80},
+ "default_host_type_index": 28, "host_connectivity_reporting_enabled": True,
+ "host_type_options": {}, "name": 'array1'}
+ self.assertFalse(instance.change_cache_flush_threshold_required())
+
+ self._set_args({"cache_block_size": 32768, "cache_flush_threshold": 90, "default_host_type": "linux dm-mp", "automatic_load_balancing": "enabled",
+ "host_connectivity_reporting": "enabled", "name": "array1"})
+ instance = NetAppESeriesGlobalSettings()
+ instance.get_current_configuration = lambda: {"autoload_capable": False, "autoload_enabled": True, "cache_block_size_options": [16384, 32768],
+ "cache_settings": {"cache_block_size": 32768, "cache_flush_threshold": 80},
+ "default_host_type_index": 28, "host_connectivity_reporting_enabled": True,
+ "host_type_options": {}, "name": 'array1'}
+ self.assertTrue(instance.change_cache_flush_threshold_required())
+
+ def test_change_cache_flush_threshold_required_fail(self):
+ """Verify change_cache_block_size_required throws expected exceptions."""
+ self._set_args({"cache_block_size": 32768, "cache_flush_threshold": 100, "default_host_type": "linux dm-mp", "automatic_load_balancing": "enabled",
+ "host_connectivity_reporting": "enabled", "name": "array1"})
+ instance = NetAppESeriesGlobalSettings()
+ instance.get_current_configuration = lambda: {"autoload_capable": False, "autoload_enabled": True, "cache_block_size_options": [16384, 32768],
+ "cache_settings": {"cache_block_size": 32768, "cache_flush_threshold": 80},
+ "default_host_type_index": 28, "host_connectivity_reporting_enabled": True,
+ "host_type_options": {}, "name": 'array1'}
+
+ with self.assertRaisesRegexp(AnsibleFailJson, r"Invalid cache flushing threshold, it must be equal to or between 0 and 100."):
+ instance.change_cache_flush_threshold_required()
+
+ self._set_args({"cache_block_size": 32768, "cache_flush_threshold": 0, "default_host_type": "linux dm-mp", "automatic_load_balancing": "enabled",
+ "host_connectivity_reporting": "enabled", "name": "array1"})
+ instance = NetAppESeriesGlobalSettings()
+ instance.get_current_configuration = lambda: {"autoload_capable": False, "autoload_enabled": True, "cache_block_size_options": [16384, 32768],
+ "cache_settings": {"cache_block_size": 32768, "cache_flush_threshold": 80},
+ "default_host_type_index": 28, "host_connectivity_reporting_enabled": True,
+ "host_type_options": {}, "name": 'array1'}
+
+ with self.assertRaisesRegexp(AnsibleFailJson, r"Invalid cache flushing threshold, it must be equal to or between 0 and 100."):
+ instance.change_cache_flush_threshold_required()
+
+ def test_change_host_type_required_pass(self):
+ """Verify change_host_type_required passes successfully."""
+ self._set_args({"cache_block_size": 32768, "cache_flush_threshold": 90, "automatic_load_balancing": "enabled",
+ "host_connectivity_reporting": "enabled", "name": "array1"})
+ instance = NetAppESeriesGlobalSettings()
+ instance.get_current_configuration = lambda: {"autoload_capable": False, "autoload_enabled": True, "cache_block_size_options": [16384, 32768],
+ "cache_settings": {"cache_block_size": 32768, "cache_flush_threshold": 80},
+ "default_host_type_index": 28, "host_connectivity_reporting_enabled": True,
+ "host_type_options": {"windows": 1, "linux": 28}, "name": 'array1'}
+ self.assertFalse(instance.change_host_type_required())
+
+ self._set_args({"cache_block_size": 32768, "cache_flush_threshold": 90, "default_host_type": "Linux DM-MP", "automatic_load_balancing": "enabled",
+ "host_connectivity_reporting": "enabled", "name": "array1"})
+ instance = NetAppESeriesGlobalSettings()
+ instance.get_current_configuration = lambda: {"autoload_capable": False, "autoload_enabled": True, "cache_block_size_options": [16384, 32768],
+ "cache_settings": {"cache_block_size": 32768, "cache_flush_threshold": 80},
+ "default_host_type_index": 28, "host_connectivity_reporting_enabled": True,
+ "host_type_options": {"windows": 1, "linux": 28}, "name": 'array1'}
+ self.assertFalse(instance.change_host_type_required())
+
+ self._set_args({"cache_block_size": 32768, "cache_flush_threshold": 90, "default_host_type": "Windows", "automatic_load_balancing": "enabled",
+ "host_connectivity_reporting": "enabled", "name": "array1"})
+ instance = NetAppESeriesGlobalSettings()
+ instance.get_current_configuration = lambda: {"autoload_capable": False, "autoload_enabled": True, "cache_block_size_options": [16384, 32768],
+ "cache_settings": {"cache_block_size": 32768, "cache_flush_threshold": 80},
+ "default_host_type_index": 28, "host_connectivity_reporting_enabled": True,
+ "host_type_options": {"windows": 1, "linux": 28}, "name": 'array1'}
+ self.assertTrue(instance.change_host_type_required())
+
+ def test_change_host_type_required_fail(self):
+ """Verify change_host_type_required throws expected exceptions"""
+ self._set_args({"cache_block_size": 32768, "cache_flush_threshold": 90, "default_host_type": "NotAHostType", "automatic_load_balancing": "enabled",
+ "host_connectivity_reporting": "enabled", "name": "array1"})
+ instance = NetAppESeriesGlobalSettings()
+ instance.get_current_configuration = lambda: {"autoload_capable": False, "autoload_enabled": True, "cache_block_size_options": [16384, 32768],
+ "cache_settings": {"cache_block_size": 32768, "cache_flush_threshold": 80},
+ "default_host_type_index": 28, "host_connectivity_reporting_enabled": True,
+ "host_type_options": {"windows": 1, "linux": 28}, "name": 'array1'}
+ with self.assertRaisesRegexp(AnsibleFailJson, r"Invalid host type index!"):
+ self.assertTrue(instance.change_host_type_required())
+
+ def test_change_autoload_enabled_required_pass(self):
+ """Verify change_autoload_enabled_required passes successfully."""
+ self._set_args({"cache_block_size": 32768, "cache_flush_threshold": 90, "default_host_type": "Windows",
+ "host_connectivity_reporting": "enabled", "name": "array1"})
+ instance = NetAppESeriesGlobalSettings()
+ instance.get_current_configuration = lambda: {"autoload_capable": True, "autoload_enabled": True, "cache_block_size_options": [16384, 32768],
+ "cache_settings": {"cache_block_size": 32768, "cache_flush_threshold": 80},
+ "default_host_type_index": 28, "host_connectivity_reporting_enabled": True,
+ "host_type_options": {"windows": 1, "linux": 28}, "name": 'array1'}
+ self.assertFalse(instance.change_autoload_enabled_required())
+
+ self._set_args({"cache_block_size": 32768, "cache_flush_threshold": 90, "default_host_type": "Windows", "automatic_load_balancing": "enabled",
+ "host_connectivity_reporting": "enabled", "name": "array1"})
+ instance = NetAppESeriesGlobalSettings()
+ instance.get_current_configuration = lambda: {"autoload_capable": True, "autoload_enabled": True, "cache_block_size_options": [16384, 32768],
+ "cache_settings": {"cache_block_size": 32768, "cache_flush_threshold": 80},
+ "default_host_type_index": 28, "host_connectivity_reporting_enabled": True,
+ "host_type_options": {"windows": 1, "linux": 28}, "name": 'array1'}
+ self.assertFalse(instance.change_autoload_enabled_required())
+
+ self._set_args({"cache_block_size": 32768, "cache_flush_threshold": 90, "default_host_type": "Windows", "automatic_load_balancing": "disabled",
+ "host_connectivity_reporting": "enabled", "name": "array1"})
+ instance = NetAppESeriesGlobalSettings()
+ instance.get_current_configuration = lambda: {"autoload_capable": True, "autoload_enabled": True, "cache_block_size_options": [16384, 32768],
+ "cache_settings": {"cache_block_size": 32768, "cache_flush_threshold": 80},
+ "default_host_type_index": 28, "host_connectivity_reporting_enabled": True,
+ "host_type_options": {"windows": 1, "linux": 28}, "name": 'array1'}
+ self.assertTrue(instance.change_autoload_enabled_required())
+
+ def test_change_autoload_enabled_required_fail(self):
+ """Verify change_autoload_enabled_required throws expected exceptions"""
+ self._set_args({"cache_block_size": 32768, "cache_flush_threshold": 90, "default_host_type": "NotAHostType", "automatic_load_balancing": "enabled",
+ "host_connectivity_reporting": "enabled", "name": "array1"})
+ instance = NetAppESeriesGlobalSettings()
+ instance.get_current_configuration = lambda: {"autoload_capable": False, "autoload_enabled": True, "cache_block_size_options": [16384, 32768],
+ "cache_settings": {"cache_block_size": 32768, "cache_flush_threshold": 80},
+ "default_host_type_index": 28, "host_connectivity_reporting_enabled": True,
+ "host_type_options": {"windows": 1, "linux": 28}, "name": 'array1'}
+ with self.assertRaisesRegexp(AnsibleFailJson, r"Automatic load balancing is not available."):
+ self.assertTrue(instance.change_autoload_enabled_required())
+
+ def test_change_host_connectivity_reporting_enabled_required_pass(self):
+ """Verify change_host_connectivity_reporting_enabled_required passes successfully."""
+ self._set_args({"cache_block_size": 32768, "cache_flush_threshold": 90, "default_host_type": "Windows", "automatic_load_balancing": "disabled",
+ "name": "array1"})
+ instance = NetAppESeriesGlobalSettings()
+ instance.get_current_configuration = lambda: {"autoload_capable": True, "autoload_enabled": True, "cache_block_size_options": [16384, 32768],
+ "cache_settings": {"cache_block_size": 32768, "cache_flush_threshold": 80},
+ "default_host_type_index": 28, "host_connectivity_reporting_enabled": True,
+ "host_type_options": {"windows": 1, "linux": 28}, "name": 'array1'}
+ self.assertFalse(instance.change_host_connectivity_reporting_enabled_required())
+
+ self._set_args({"cache_block_size": 32768, "cache_flush_threshold": 90, "default_host_type": "Windows", "automatic_load_balancing": "disabled",
+ "host_connectivity_reporting": "enabled", "name": "array1"})
+ instance = NetAppESeriesGlobalSettings()
+ instance.get_current_configuration = lambda: {"autoload_capable": True, "autoload_enabled": True, "cache_block_size_options": [16384, 32768],
+ "cache_settings": {"cache_block_size": 32768, "cache_flush_threshold": 80},
+ "default_host_type_index": 28, "host_connectivity_reporting_enabled": True,
+ "host_type_options": {"windows": 1, "linux": 28}, "name": 'array1'}
+ self.assertFalse(instance.change_host_connectivity_reporting_enabled_required())
+
+ self._set_args({"cache_block_size": 32768, "cache_flush_threshold": 90, "default_host_type": "Windows", "automatic_load_balancing": "disabled",
+ "host_connectivity_reporting": "disabled", "name": "array1"})
+ instance = NetAppESeriesGlobalSettings()
+ instance.get_current_configuration = lambda: {"autoload_capable": True, "autoload_enabled": True, "cache_block_size_options": [16384, 32768],
+ "cache_settings": {"cache_block_size": 32768, "cache_flush_threshold": 80},
+ "default_host_type_index": 28, "host_connectivity_reporting_enabled": True,
+ "host_type_options": {"windows": 1, "linux": 28}, "name": 'array1'}
+ self.assertTrue(instance.change_host_connectivity_reporting_enabled_required())
+
+ def test_change_name_required_pass(self):
+ """Verify change_name_required passes successfully."""
+ self._set_args({"cache_block_size": 32768, "cache_flush_threshold": 90, "default_host_type": "Windows", "automatic_load_balancing": "disabled",
+ "host_connectivity_reporting": "disabled"})
+ instance = NetAppESeriesGlobalSettings()
+ instance.get_current_configuration = lambda: {"autoload_capable": True, "autoload_enabled": True, "cache_block_size_options": [16384, 32768],
+ "cache_settings": {"cache_block_size": 32768, "cache_flush_threshold": 80},
+ "default_host_type_index": 28, "host_connectivity_reporting_enabled": True,
+ "host_type_options": {"windows": 1, "linux": 28}, "name": 'array1'}
+ self.assertFalse(instance.change_name_required())
+
+ self._set_args({"cache_block_size": 32768, "cache_flush_threshold": 90, "default_host_type": "Windows", "automatic_load_balancing": "disabled",
+ "host_connectivity_reporting": "disabled", "name": "array1"})
+ instance = NetAppESeriesGlobalSettings()
+ instance.get_current_configuration = lambda: {"autoload_capable": True, "autoload_enabled": True, "cache_block_size_options": [16384, 32768],
+ "cache_settings": {"cache_block_size": 32768, "cache_flush_threshold": 80},
+ "default_host_type_index": 28, "host_connectivity_reporting_enabled": True,
+ "host_type_options": {"windows": 1, "linux": 28}, "name": 'array1'}
+ self.assertFalse(instance.change_name_required())
+
+ self._set_args({"cache_block_size": 32768, "cache_flush_threshold": 90, "default_host_type": "Windows", "automatic_load_balancing": "disabled",
+ "host_connectivity_reporting": "disabled", "name": "array2"})
+ instance = NetAppESeriesGlobalSettings()
+ instance.get_current_configuration = lambda: {"autoload_capable": True, "autoload_enabled": True, "cache_block_size_options": [16384, 32768],
+ "cache_settings": {"cache_block_size": 32768, "cache_flush_threshold": 80},
+ "default_host_type_index": 28, "host_connectivity_reporting_enabled": True,
+ "host_type_options": {"windows": 1, "linux": 28}, "name": 'array1'}
+ self.assertTrue(instance.change_name_required())
+
+ def test_change_name_required_fail(self):
+ """Verify change_name_required throws expected exceptions"""
+ self._set_args({"cache_block_size": 32768, "cache_flush_threshold": 90, "default_host_type": "NotAHostType", "automatic_load_balancing": "enabled",
+ "host_connectivity_reporting": "enabled", "name": "A" * 31})
+ instance = NetAppESeriesGlobalSettings()
+ instance.get_current_configuration = lambda: {"autoload_capable": False, "autoload_enabled": True, "cache_block_size_options": [16384, 32768],
+ "cache_settings": {"cache_block_size": 32768, "cache_flush_threshold": 80},
+ "default_host_type_index": 28, "host_connectivity_reporting_enabled": True,
+ "host_type_options": {"windows": 1, "linux": 28}, "name": 'array1'}
+ with self.assertRaisesRegexp(AnsibleFailJson, r"The provided name is invalid, it must be less than or equal to 30 characters in length."):
+ self.assertTrue(instance.change_name_required())
+
+ def test_update_cache_settings_pass(self):
+ """Verify update_cache_settings passes successfully."""
+ self._set_args({"cache_block_size": 32768, "cache_flush_threshold": 90, "default_host_type": "Windows", "automatic_load_balancing": "disabled",
+ "host_connectivity_reporting": "disabled", "name": "array2"})
+ instance = NetAppESeriesGlobalSettings()
+ instance.get_current_configuration = lambda: {"autoload_capable": True, "autoload_enabled": True, "cache_block_size_options": [16384, 32768],
+ "cache_settings": {"cache_block_size": 32768, "cache_flush_threshold": 80},
+ "default_host_type_index": 28, "host_connectivity_reporting_enabled": True,
+ "host_type_options": {"windows": 1, "linux": 28}, "name": 'array1'}
+ with patch(self.REQ_FUNC, return_value=(200, None)):
+ instance.update_cache_settings()
+
+ def test_update_cache_settings_fail(self):
+ """Verify update_cache_settings throws expected exceptions"""
+ self._set_args({"cache_block_size": 32768, "cache_flush_threshold": 90, "default_host_type": "Windows", "automatic_load_balancing": "disabled",
+ "host_connectivity_reporting": "disabled", "name": "array2"})
+ instance = NetAppESeriesGlobalSettings()
+ instance.get_current_configuration = lambda: {"autoload_capable": True, "autoload_enabled": True, "cache_block_size_options": [16384, 32768],
+ "cache_settings": {"cache_block_size": 32768, "cache_flush_threshold": 80},
+ "default_host_type_index": 28, "host_connectivity_reporting_enabled": True,
+ "host_type_options": {"windows": 1, "linux": 28}, "name": 'array1'}
+ with self.assertRaisesRegexp(AnsibleFailJson, r"Failed to set cache settings."):
+ with patch(self.REQ_FUNC, return_value=Exception()):
+ instance.update_cache_settings()
+
+ def test_update_host_type_pass(self):
+ """Verify update_host_type passes successfully."""
+ self._set_args({"cache_block_size": 32768, "cache_flush_threshold": 90, "default_host_type": "Windows", "automatic_load_balancing": "disabled",
+ "host_connectivity_reporting": "disabled", "name": "array2"})
+ instance = NetAppESeriesGlobalSettings()
+ instance.get_current_configuration = lambda: {"autoload_capable": True, "autoload_enabled": True, "cache_block_size_options": [16384, 32768],
+ "cache_settings": {"cache_block_size": 32768, "cache_flush_threshold": 80},
+ "default_host_type_index": 28, "host_connectivity_reporting_enabled": True,
+ "host_type_options": {"windows": 1, "linux": 28}, "name": 'array1'}
+ with patch(self.REQ_FUNC, return_value=(200, None)):
+ instance.update_host_type()
+
+ def test_update_host_type_fail(self):
+ """Verify update_host_type throws expected exceptions"""
+ self._set_args({"cache_block_size": 32768, "cache_flush_threshold": 90, "default_host_type": "Windows", "automatic_load_balancing": "disabled",
+ "host_connectivity_reporting": "disabled", "name": "array2"})
+ instance = NetAppESeriesGlobalSettings()
+ instance.get_current_configuration = lambda: {"autoload_capable": True, "autoload_enabled": True, "cache_block_size_options": [16384, 32768],
+ "cache_settings": {"cache_block_size": 32768, "cache_flush_threshold": 80},
+ "default_host_type_index": 28, "host_connectivity_reporting_enabled": True,
+ "host_type_options": {"windows": 1, "linux": 28}, "name": 'array1'}
+ with self.assertRaisesRegexp(AnsibleFailJson, r"Failed to set default host type."):
+ with patch(self.REQ_FUNC, return_value=Exception()):
+ instance.update_host_type()
+
+ def test_update_autoload_pass(self):
+ """Verify update_autoload passes successfully."""
+ self._set_args({"cache_block_size": 32768, "cache_flush_threshold": 90, "default_host_type": "Windows", "automatic_load_balancing": "disabled",
+ "host_connectivity_reporting": "disabled", "name": "array2"})
+ instance = NetAppESeriesGlobalSettings()
+ instance.get_current_configuration = lambda: {"autoload_capable": True, "autoload_enabled": True, "cache_block_size_options": [16384, 32768],
+ "cache_settings": {"cache_block_size": 32768, "cache_flush_threshold": 80},
+ "default_host_type_index": 28, "host_connectivity_reporting_enabled": True,
+ "host_type_options": {"windows": 1, "linux": 28}, "name": 'array1'}
+ with patch(self.REQ_FUNC, return_value=(200, None)):
+ instance.update_autoload()
+
+ def test_update_autoload_fail(self):
+ """Verify update_autoload throws expected exceptions"""
+ self._set_args({"cache_block_size": 32768, "cache_flush_threshold": 90, "default_host_type": "Windows", "automatic_load_balancing": "disabled",
+ "host_connectivity_reporting": "disabled", "name": "array2"})
+ instance = NetAppESeriesGlobalSettings()
+ instance.get_current_configuration = lambda: {"autoload_capable": True, "autoload_enabled": True, "cache_block_size_options": [16384, 32768],
+ "cache_settings": {"cache_block_size": 32768, "cache_flush_threshold": 80},
+ "default_host_type_index": 28, "host_connectivity_reporting_enabled": True,
+ "host_type_options": {"windows": 1, "linux": 28}, "name": 'array1'}
+ with self.assertRaisesRegexp(AnsibleFailJson, r"Failed to set automatic load balancing state."):
+ with patch(self.REQ_FUNC, return_value=Exception()):
+ instance.update_autoload()
+
+ def test_update_host_connectivity_reporting_enabled_pass(self):
+ """Verify update_host_connectivity_reporting_enabled passes successfully."""
+ self._set_args({"cache_block_size": 32768, "cache_flush_threshold": 90, "default_host_type": "Windows", "automatic_load_balancing": "disabled",
+ "host_connectivity_reporting": "disabled", "name": "array2"})
+ instance = NetAppESeriesGlobalSettings()
+ instance.get_current_configuration = lambda: {"autoload_capable": True, "autoload_enabled": True, "cache_block_size_options": [16384, 32768],
+ "cache_settings": {"cache_block_size": 32768, "cache_flush_threshold": 80},
+ "default_host_type_index": 28, "host_connectivity_reporting_enabled": True,
+ "host_type_options": {"windows": 1, "linux": 28}, "name": 'array1'}
+ with patch(self.REQ_FUNC, return_value=(200, None)):
+ instance.update_host_connectivity_reporting_enabled()
+
+ def test_update_host_connectivity_reporting_enabled_fail(self):
+ """Verify update_host_connectivity_reporting_enabled throws expected exceptions"""
+ self._set_args({"cache_block_size": 32768, "cache_flush_threshold": 90, "default_host_type": "Windows", "automatic_load_balancing": "disabled",
+ "host_connectivity_reporting": "disabled", "name": "array2"})
+ instance = NetAppESeriesGlobalSettings()
+ instance.get_current_configuration = lambda: {"autoload_capable": True, "autoload_enabled": True, "cache_block_size_options": [16384, 32768],
+ "cache_settings": {"cache_block_size": 32768, "cache_flush_threshold": 80},
+ "default_host_type_index": 28, "host_connectivity_reporting_enabled": True,
+ "host_type_options": {"windows": 1, "linux": 28}, "name": 'array1'}
+ with self.assertRaisesRegexp(AnsibleFailJson, r"Failed to enable host connectivity reporting."):
+ with patch(self.REQ_FUNC, return_value=Exception()):
+ instance.update_host_connectivity_reporting_enabled()
+
+ def test_update_name_pass(self):
+ """Verify update_name passes successfully."""
+ self._set_args({"cache_block_size": 32768, "cache_flush_threshold": 90, "default_host_type": "Windows", "automatic_load_balancing": "disabled",
+ "host_connectivity_reporting": "disabled", "name": "array2"})
+ instance = NetAppESeriesGlobalSettings()
+ instance.get_current_configuration = lambda: {"autoload_capable": True, "autoload_enabled": True, "cache_block_size_options": [16384, 32768],
+ "cache_settings": {"cache_block_size": 32768, "cache_flush_threshold": 80},
+ "default_host_type_index": 28, "host_connectivity_reporting_enabled": True,
+ "host_type_options": {"windows": 1, "linux": 28}, "name": 'array1'}
+ with patch(self.REQ_FUNC, return_value=(200, None)):
+ instance.update_name()
+
+ def test_update_name_fail(self):
+ """Verify update_name throws expected exceptions"""
+ self._set_args({"cache_block_size": 32768, "cache_flush_threshold": 90, "default_host_type": "Windows", "automatic_load_balancing": "disabled",
+ "host_connectivity_reporting": "disabled", "name": "array2"})
+ instance = NetAppESeriesGlobalSettings()
+ instance.get_current_configuration = lambda: {"autoload_capable": True, "autoload_enabled": True, "cache_block_size_options": [16384, 32768],
+ "cache_settings": {"cache_block_size": 32768, "cache_flush_threshold": 80},
+ "default_host_type_index": 28, "host_connectivity_reporting_enabled": True,
+ "host_type_options": {"windows": 1, "linux": 28}, "name": 'array1'}
+ with self.assertRaisesRegexp(AnsibleFailJson, r"Failed to set the storage array name!"):
+ with patch(self.REQ_FUNC, return_value=Exception()):
+ instance.update_name()
+
+ def test_update_pass(self):
+ """Verify update passes successfully."""
+ self._set_args({"cache_block_size": 32768, "cache_flush_threshold": 90, "default_host_type": "Windows", "automatic_load_balancing": "disabled",
+ "host_connectivity_reporting": "disabled", "name": "array2"})
+ instance = NetAppESeriesGlobalSettings()
+
+ instance.change_autoload_enabled_required = lambda: False
+ instance.change_cache_block_size_required = lambda: False
+ instance.change_cache_flush_threshold_required = lambda: False
+ instance.change_host_type_required = lambda: False
+ instance.change_name_required = lambda: False
+ instance.change_host_connectivity_reporting_enabled_required = lambda: False
+ with self.assertRaisesRegexp(AnsibleExitJson, r"'changed': False"):
+ with patch(self.REQ_FUNC, side_effect=[(200, {"productCapabilities": [], "featureParameters": {"cacheBlockSizes": []}}), (200, []),
+ (200, [{"defaultHostTypeIndex": 28, "cache": {"cacheBlkSize": 32768, "demandFlushThreshold": 90}}]),
+ (200, {"autoLoadBalancingEnabled": True, "hostConnectivityReportingEnabled": True, "name": "array1"})] * 2):
+ instance.update()
+
+ self._set_args({"cache_block_size": 32768, "cache_flush_threshold": 90, "default_host_type": "Windows", "automatic_load_balancing": "disabled",
+ "host_connectivity_reporting": "disabled", "name": "array2"})
+ instance = NetAppESeriesGlobalSettings()
+ instance.change_autoload_enabled_required = lambda: True
+ instance.change_cache_block_size_required = lambda: False
+ instance.change_cache_flush_threshold_required = lambda: False
+ instance.change_host_type_required = lambda: False
+ instance.change_name_required = lambda: False
+ instance.change_host_connectivity_reporting_enabled_required = lambda: False
+ instance.update_autoload = lambda: None
+ with self.assertRaisesRegexp(AnsibleExitJson, r"'changed': True"):
+ with patch(self.REQ_FUNC, side_effect=[(200, {"productCapabilities": [], "featureParameters": {"cacheBlockSizes": []}}), (200, []),
+ (200, [{"defaultHostTypeIndex": 28, "cache": {"cacheBlkSize": 32768, "demandFlushThreshold": 90}}]),
+ (200, {"autoLoadBalancingEnabled": True, "hostConnectivityReportingEnabled": True, "name": "array1"})] * 2):
+ instance.update()
diff --git a/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_host.py b/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_host.py
new file mode 100644
index 000000000..646010ffc
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_host.py
@@ -0,0 +1,434 @@
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_host import NetAppESeriesHost
+from units.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args
+from units.compat import mock
+
+
+class HostTest(ModuleTestCase):
+ REQUIRED_PARAMS = {
+ 'api_username': 'rw',
+ 'api_password': 'password',
+ 'api_url': 'http://localhost',
+ 'ssid': '1',
+ 'name': '1',
+ }
+ HOST = {
+ 'name': '1',
+ 'hostRef': '123',
+ 'label': '1',
+ 'id': '0' * 30,
+ 'clusterRef': 40 * '0',
+ 'hostTypeIndex': 28,
+ 'hostSidePorts': [],
+ 'initiators': [],
+ 'ports': [],
+ }
+ HOST_ALT = {
+ 'name': '2',
+ 'label': '2',
+ 'id': '1' * 30,
+ 'clusterRef': '1',
+ 'hostSidePorts': [],
+ 'initiators': [],
+ 'ports': [],
+ }
+ EXISTING_HOSTS = [
+ {"hostRef": "84000000600A098000A4B28D00303D065D430118", "clusterRef": "0000000000000000000000000000000000000000", "label": "Beegfs_storage1",
+ "hostTypeIndex": 28, "ports": [], "initiators": [{"initiatorRef": "89000000600A098000A4B28D00303CF55D4300E3",
+ "nodeName": {"ioInterfaceType": "iscsi",
+ "iscsiNodeName": "iqn.1993-08.org.debian.beegfs-storage1:01:b0621126818",
+ "remoteNodeWWN": None, "nvmeNodeName": None},
+ "alias": {"ioInterfaceType": "iscsi", "iscsiAlias": ""}, "label": "beegfs_storage1_iscsi_0",
+ "hostRef": "84000000600A098000A4B28D00303D065D430118",
+ "id": "89000000600A098000A4B28D00303CF55D4300E3"}],
+ "hostSidePorts": [{"type": "iscsi", "address": "iqn.1993-08.org.debian.beegfs-storage1:01:b0621126818", "label": "beegfs_storage1_iscsi_0"}],
+ "id": "84000000600A098000A4B28D00303D065D430118", "name": "beegfs_storage1"},
+ {"hostRef": "84000000600A098000A4B9D10030370B5D430109", "clusterRef": "0000000000000000000000000000000000000000", "label": "beegfs_metadata1",
+ "hostTypeIndex": 28, "ports": [], "initiators": [{"initiatorRef": "89000000600A098000A4B28D00303CFC5D4300F7",
+ "nodeName": {"ioInterfaceType": "iscsi",
+ "iscsiNodeName": "iqn.1993-08.org.debian.beegfs-metadata:01:69e4efdf30b8",
+ "remoteNodeWWN": None, "nvmeNodeName": None},
+ "alias": {"ioInterfaceType": "iscsi", "iscsiAlias": ""}, "label": "beegfs_metadata1_iscsi_0",
+ "hostRef": "84000000600A098000A4B9D10030370B5D430109",
+ "id": "89000000600A098000A4B28D00303CFC5D4300F7"}],
+ "hostSidePorts": [{"type": "iscsi", "address": "iqn.1993-08.org.debian.beegfs-metadata:01:69e4efdf30b8", "label": "beegfs_metadata1_iscsi_0"}],
+ "id": "84000000600A098000A4B9D10030370B5D430109", "name": "beegfs_metadata1"},
+ {"hostRef": "84000000600A098000A4B9D10030370B5D430109", "clusterRef": "85000000600A098000A4B9D1003637135D483DEB", "label": "beegfs_metadata2",
+ "hostTypeIndex": 28, "ports": [], "initiators": [{"initiatorRef": "89000000600A098000A4B28D00303CFC5D4300F7",
+ "nodeName": {"ioInterfaceType": "iscsi",
+ "iscsiNodeName": "iqn.used_elsewhere",
+ "remoteNodeWWN": None, "nvmeNodeName": None},
+ "alias": {"ioInterfaceType": "iscsi", "iscsiAlias": ""}, "label": "beegfs_metadata2_iscsi_0",
+ "hostRef": "84000000600A098000A4B9D10030370B5D430109",
+ "id": "89000000600A098000A4B28D00303CFC5D4300F7"}],
+ "hostSidePorts": [{"type": "iscsi", "address": "iqn.used_elsewhere", "label": "beegfs_metadata2_iscsi_0"}],
+ "id": "84000000600A098000A4B9D10030370B5D430120", "name": "beegfs_metadata2"}]
+ HOST_GROUPS = [{"clusterRef": "85000000600A098000A4B9D1003637135D483DEB", "label": "test_group", "isSAControlled": False,
+ "confirmLUNMappingCreation": False, "protectionInformationCapableAccessMethod": True, "isLun0Restricted": False,
+ "id": "85000000600A098000A4B9D1003637135D483DEB", "name": "test_group"}]
+ HOST_TYPES = [{"name": "FactoryDefault", "index": 0, "code": "FactoryDefault"},
+ {"name": "Windows 2000/Server 2003/Server 2008 Non-Clustered", "index": 1, "code": "W2KNETNCL"},
+ {"name": "Solaris", "index": 2, "code": "SOL"},
+ {"name": "Linux", "index": 6, "code": "LNX"},
+ {"name": "LnxALUA", "index": 7, "code": "LnxALUA"},
+ {"name": "Windows 2000/Server 2003/Server 2008 Clustered", "index": 8, "code": "W2KNETCL"},
+ {"name": "LnxTPGSALUA_SF", "index": 27, "code": "LnxTPGSALUA_SF"},
+ {"name": "LnxDHALUA", "index": 28, "code": "LnxDHALUA"}]
+ REQ_FUNC = 'ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_host.NetAppESeriesHost.request'
+
+ def _set_args(self, args):
+ module_args = self.REQUIRED_PARAMS.copy()
+ module_args.update(args)
+ set_module_args(module_args)
+
+ def test_host_exists_pass(self):
+ """Verify host_exists produces expected results."""
+ with mock.patch(self.REQ_FUNC, return_value=(200, self.EXISTING_HOSTS)):
+ self._set_args({'state': 'present', 'name': 'new_host', 'host_type': 'linux dm-mp', 'force_port': False,
+ 'ports': [{'label': 'new_host_port_1', 'type': 'fc', 'port': '0x08ef08ef08ef08ef'}]})
+ host = NetAppESeriesHost()
+ self.assertFalse(host.host_exists)
+
+ self._set_args({'state': 'present', 'name': 'does_not_exist', 'host_type': 'linux dm-mp',
+ 'ports': [{'label': 'beegfs_storage1_iscsi_0', 'type': 'iscsi',
+ 'port': 'iqn.1993-08.org.debian.beegfs-storage1:01:b0621126818'}]})
+ host = NetAppESeriesHost()
+ self.assertFalse(host.host_exists)
+
+ self._set_args({'state': 'present', 'name': 'beegfs_storage1', 'host_type': 'linux dm-mp',
+ 'ports': [{'label': 'beegfs_storage1_iscsi_0', 'type': 'iscsi', 'port': 'iqn.differentiqn.org'}]})
+ host = NetAppESeriesHost()
+ self.assertTrue(host.host_exists)
+
+ with mock.patch(self.REQ_FUNC, return_value=(200, self.EXISTING_HOSTS)):
+ self._set_args({'state': 'present', 'name': 'beegfs_metadata1', 'host_type': 'linux dm-mp', 'force_port': True,
+ 'ports': [{'label': 'beegfs_metadata1_iscsi_0', 'type': 'iscsi',
+ 'port': 'iqn.1993-08.org.debian.beegfs-storage1:01:b0621126818'}]})
+ host = NetAppESeriesHost()
+ self.assertTrue(host.host_exists)
+
+ def test_host_exists_fail(self):
+ """Verify host_exists produces expected exceptions."""
+ self._set_args({'state': 'present', 'host_type': 'linux dm-mp', 'ports': [{'label': 'abc', 'type': 'iscsi', 'port': 'iqn:0'}]})
+ host = NetAppESeriesHost()
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to determine host existence."):
+ with mock.patch(self.REQ_FUNC, return_value=Exception()):
+ exists = host.host_exists
+
+ def test_needs_update_pass(self):
+ """Verify needs_update produces expected results."""
+ # No changes
+ with mock.patch(self.REQ_FUNC, return_value=(200, self.EXISTING_HOSTS)):
+ self._set_args({'state': 'present', 'name': 'beegfs_metadata1', 'host_type': 'linux dm-mp',
+ 'ports': [{'label': 'beegfs_metadata1_iscsi_0', 'type': 'iscsi',
+ 'port': 'iqn.1993-08.org.debian.beegfs-metadata:01:69e4efdf30b8'}]})
+ host = NetAppESeriesHost()
+ exists = host.host_exists
+ self.assertFalse(host.needs_update)
+
+ # Change host type
+ with mock.patch(self.REQ_FUNC, return_value=(200, self.EXISTING_HOSTS)):
+ self._set_args({'state': 'present', 'name': 'beegfs_metadata1', 'host_type': 'windows', 'force_port': False,
+ 'ports': [{'label': 'beegfs_metadata1_iscsi_1', 'type': 'iscsi', 'port': 'iqn.not_used'}]})
+ host = NetAppESeriesHost()
+ exists = host.host_exists
+ self.assertTrue(host.needs_update)
+
+ # Add port to host
+ with mock.patch(self.REQ_FUNC, return_value=(200, self.EXISTING_HOSTS)):
+ self._set_args({'state': 'present', 'name': 'beegfs_metadata1', 'host_type': 'linux dm-mp', 'force_port': False,
+ 'ports': [{'label': 'beegfs_metadata1_iscsi_1', 'type': 'iscsi', 'port': 'iqn.not_used'}]})
+ host = NetAppESeriesHost()
+ exists = host.host_exists
+ self.assertTrue(host.needs_update)
+
+ # Change port name
+ with mock.patch(self.REQ_FUNC, return_value=(200, self.EXISTING_HOSTS)):
+ self._set_args({'state': 'present', 'name': 'beegfs_metadata1', 'host_type': 'linux dm-mp', 'force_port': False,
+ 'ports': [{'label': 'beegfs_metadata1_iscsi_2', 'type': 'iscsi',
+ 'port': 'iqn.1993-08.org.debian.beegfs-metadata:01:69e4efdf30b8'}]})
+ host = NetAppESeriesHost()
+ exists = host.host_exists
+ self.assertTrue(host.needs_update)
+
+ # take port from another host by force
+ with mock.patch(self.REQ_FUNC, return_value=(200, self.EXISTING_HOSTS)):
+ self._set_args({'state': 'present', 'name': 'beegfs_metadata1', 'host_type': 'linux dm-mp', 'force_port': True,
+ 'ports': [{'label': 'beegfs_metadata2_iscsi_0', 'type': 'iscsi',
+ 'port': 'iqn.1993-08.org.debian.beegfs-metadata:01:69e4efdf30b8'}]})
+ host = NetAppESeriesHost()
+ exists = host.host_exists
+ self.assertTrue(host.needs_update)
+
+ def test_needs_update_fail(self):
+ """Verify needs_update produces expected exceptions."""
+ with self.assertRaisesRegexp(AnsibleFailJson, "is associated with a different host."):
+ with mock.patch(self.REQ_FUNC, return_value=(200, self.EXISTING_HOSTS)):
+ self._set_args({'state': 'present', 'name': 'beegfs_metadata1', 'host_type': 'linux dm-mp', 'force_port': False,
+ 'ports': [{'label': 'beegfs_metadata2_iscsi_0', 'type': 'iscsi',
+ 'port': 'iqn.1993-08.org.debian.beegfs-metadata:01:69e4efdf30b8'}]})
+ host = NetAppESeriesHost()
+ exists = host.host_exists
+ host.needs_update
+
+ def test_valid_host_type_pass(self):
+ """Validate the available host types."""
+ with mock.patch(self.REQ_FUNC, return_value=(200, self.HOST_TYPES)):
+ self._set_args({'state': 'present', 'host_type': '0'})
+ host = NetAppESeriesHost()
+ self.assertTrue(host.valid_host_type)
+ self._set_args({'state': 'present', 'host_type': '28'})
+ host = NetAppESeriesHost()
+ self.assertTrue(host.valid_host_type)
+ self._set_args({'state': 'present', 'host_type': 'windows'})
+ host = NetAppESeriesHost()
+ self.assertTrue(host.valid_host_type)
+ self._set_args({'state': 'present', 'host_type': 'linux dm-mp'})
+ host = NetAppESeriesHost()
+ self.assertTrue(host.valid_host_type)
+
+ def test_valid_host_type_fail(self):
+ """Validate the available host types."""
+ with self.assertRaisesRegexp(AnsibleFailJson, "host_type must be either a host type name or host type index found integer the documentation"):
+ self._set_args({'state': 'present', 'host_type': 'non-host-type'})
+ host = NetAppESeriesHost()
+
+ with mock.patch(self.REQ_FUNC, return_value=(200, self.HOST_TYPES)):
+ with self.assertRaisesRegexp(AnsibleFailJson, "There is no host type with index"):
+ self._set_args({'state': 'present', 'host_type': '4'})
+ host = NetAppESeriesHost()
+ valid = host.valid_host_type
+
+ with mock.patch(self.REQ_FUNC, return_value=Exception()):
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to get host types."):
+ self._set_args({'state': 'present', 'host_type': '4'})
+ host = NetAppESeriesHost()
+ valid = host.valid_host_type
+
+ def test_assigned_host_ports_pass(self):
+ """Verify assigned_host_ports gives expected results."""
+
+ # Add an unused port to host
+ with mock.patch(self.REQ_FUNC, return_value=(200, self.EXISTING_HOSTS)):
+ self._set_args({'state': 'present', 'name': 'beegfs_metadata1', 'host_type': 'linux dm-mp', 'force_port': False,
+ 'ports': [{'label': 'beegfs_metadata1_iscsi_1', 'type': 'iscsi', 'port': 'iqn.not_used'}]})
+ host = NetAppESeriesHost()
+ exists = host.host_exists
+ self.assertTrue(host.needs_update)
+ self.assertEquals(host.assigned_host_ports(), {})
+
+ # Change port name (force)
+ with mock.patch(self.REQ_FUNC, return_value=(200, self.EXISTING_HOSTS)):
+ self._set_args({'state': 'present', 'name': 'beegfs_metadata1', 'host_type': 'linux dm-mp', 'force_port': True,
+ 'ports': [{'label': 'beegfs_metadata1_iscsi_2', 'type': 'iscsi',
+ 'port': 'iqn.1993-08.org.debian.beegfs-metadata:01:69e4efdf30b8'}]})
+ host = NetAppESeriesHost()
+ exists = host.host_exists
+ self.assertTrue(host.needs_update)
+ self.assertEquals(host.assigned_host_ports(), {'84000000600A098000A4B9D10030370B5D430109': ['89000000600A098000A4B28D00303CFC5D4300F7']})
+
+ # Change port type
+ with mock.patch(self.REQ_FUNC, return_value=(200, self.EXISTING_HOSTS)):
+ self._set_args({'state': 'present', 'name': 'beegfs_metadata1', 'host_type': 'linux dm-mp', 'force_port': True,
+ 'ports': [{'label': 'beegfs_metadata1_iscsi_1', 'type': 'fc', 'port': '08:ef:7e:24:52:a0'}]})
+ host = NetAppESeriesHost()
+ exists = host.host_exists
+ self.assertTrue(host.needs_update)
+ self.assertEquals(host.assigned_host_ports(), {})
+
+ # take port from another host by force
+ with mock.patch(self.REQ_FUNC, return_value=(200, self.EXISTING_HOSTS)):
+ self._set_args({'state': 'present', 'name': 'beegfs_metadata1', 'host_type': 'linux dm-mp', 'force_port': True,
+ 'ports': [{'label': 'beegfs_metadata2_iscsi_0', 'type': 'iscsi', 'port': 'iqn.used_elsewhere'}]})
+ host = NetAppESeriesHost()
+ exists = host.host_exists
+ self.assertTrue(host.needs_update)
+ self.assertEquals(host.assigned_host_ports(), {'84000000600A098000A4B9D10030370B5D430109': ['89000000600A098000A4B28D00303CFC5D4300F7']})
+
+ # take port from another host by force
+ with mock.patch(self.REQ_FUNC, side_effect=[(200, self.EXISTING_HOSTS), (200, {})]):
+ self._set_args({'state': 'present', 'name': 'beegfs_metadata1', 'host_type': 'linux dm-mp', 'force_port': True,
+ 'ports': [{'label': 'beegfs_metadata2_iscsi_0', 'type': 'iscsi', 'port': 'iqn.used_elsewhere'}]})
+ host = NetAppESeriesHost()
+ exists = host.host_exists
+ self.assertTrue(host.needs_update)
+ self.assertEquals(host.assigned_host_ports(apply_unassigning=True),
+ {'84000000600A098000A4B9D10030370B5D430109': ['89000000600A098000A4B28D00303CFC5D4300F7']})
+
+ def test_assigned_host_ports_fail(self):
+ """Verify assigned_host_ports gives expected exceptions."""
+ # take port from another
+ with self.assertRaisesRegexp(AnsibleFailJson, "There are no host ports available OR there are not enough unassigned host ports"):
+ with mock.patch(self.REQ_FUNC, side_effect=[(200, self.EXISTING_HOSTS)]):
+ self._set_args({'state': 'present', 'name': 'beegfs_metadata1', 'host_type': 'linux dm-mp', 'force_port': False,
+ 'ports': [{'label': 'beegfs_metadata1_iscsi_2', 'type': 'iscsi',
+ 'port': 'iqn.1993-08.org.debian.beegfs-metadata:01:69e4efdf30b8'}]})
+ host = NetAppESeriesHost()
+ exists = host.host_exists
+ self.assertTrue(host.needs_update)
+ host.assigned_host_ports(apply_unassigning=True)
+
+ # take port from another host and fail because force == False
+ with self.assertRaisesRegexp(AnsibleFailJson, "There are no host ports available OR there are not enough unassigned host ports"):
+ with mock.patch(self.REQ_FUNC, side_effect=[(200, self.EXISTING_HOSTS)]):
+ self._set_args({'state': 'present', 'name': 'beegfs_metadata1', 'host_type': 'linux dm-mp', 'force_port': False,
+ 'ports': [{'label': 'beegfs_metadata2_iscsi_0', 'type': 'iscsi', 'port': 'iqn.used_elsewhere'}]})
+ host = NetAppESeriesHost()
+ exists = host.host_exists
+ self.assertTrue(host.needs_update)
+ host.assigned_host_ports(apply_unassigning=True)
+
+ # take port from another host and fail because force == False
+ with self.assertRaisesRegexp(AnsibleFailJson, "There are no host ports available OR there are not enough unassigned host ports"):
+ with mock.patch(self.REQ_FUNC, side_effect=[(200, self.EXISTING_HOSTS)]):
+ self._set_args({'state': 'present', 'name': 'beegfs_metadata3', 'host_type': 'linux dm-mp', 'force_port': False,
+ 'ports': [{'label': 'beegfs_metadata2_iscsi_0', 'type': 'iscsi', 'port': 'iqn.used_elsewhere'}]})
+ host = NetAppESeriesHost()
+ exists = host.host_exists
+ host.assigned_host_ports(apply_unassigning=True)
+
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to unassign host port."):
+ with mock.patch(self.REQ_FUNC, side_effect=[(200, self.EXISTING_HOSTS), Exception()]):
+ self._set_args({'state': 'present', 'name': 'beegfs_metadata1', 'host_type': 'linux dm-mp', 'force_port': True,
+ 'ports': [{'label': 'beegfs_metadata2_iscsi_0', 'type': 'iscsi', 'port': 'iqn.used_elsewhere'}]})
+ host = NetAppESeriesHost()
+ exists = host.host_exists
+ self.assertTrue(host.needs_update)
+ host.assigned_host_ports(apply_unassigning=True)
+
+ def test_update_host_pass(self):
+ """Verify update_host produces expected results."""
+ # Change host type
+ with self.assertRaises(AnsibleExitJson):
+ with mock.patch(self.REQ_FUNC, return_value=(200, self.EXISTING_HOSTS)):
+ self._set_args({'state': 'present', 'name': 'beegfs_metadata1', 'host_type': 'windows', 'force_port': True,
+ 'ports': [{'label': 'beegfs_metadata1_iscsi_1', 'type': 'iscsi',
+ 'port': 'iqn.1993-08.org.debian.beegfs-storage1:01:b0621126818'}]})
+ host = NetAppESeriesHost()
+ host.build_success_payload = lambda x: {}
+ exists = host.host_exists
+ self.assertTrue(host.needs_update)
+ host.update_host()
+
+ # Change port iqn
+ with self.assertRaises(AnsibleExitJson):
+ with mock.patch(self.REQ_FUNC, return_value=(200, self.EXISTING_HOSTS)):
+ self._set_args({'state': 'present', 'name': 'beegfs_metadata1', 'host_type': 'linux dm-mp', 'force_port': False,
+ 'ports': [{'label': 'beegfs_metadata1_iscsi_1', 'type': 'iscsi', 'port': 'iqn.not_used'}]})
+ host = NetAppESeriesHost()
+ host.build_success_payload = lambda x: {}
+ exists = host.host_exists
+ self.assertTrue(host.needs_update)
+ host.update_host()
+
+ # Change port type to fc
+ with self.assertRaises(AnsibleExitJson):
+ with mock.patch(self.REQ_FUNC, return_value=(200, self.EXISTING_HOSTS)):
+ self._set_args({'state': 'present', 'name': 'beegfs_metadata1', 'host_type': 'linux dm-mp', 'force_port': False,
+ 'ports': [{'label': 'beegfs_metadata1_iscsi_1', 'type': 'fc', 'port': '0x08ef08ef08ef08ef'}]})
+ host = NetAppESeriesHost()
+ host.build_success_payload = lambda x: {}
+ exists = host.host_exists
+ self.assertTrue(host.needs_update)
+ host.update_host()
+
+ # Change port name
+ with self.assertRaises(AnsibleExitJson):
+ with mock.patch(self.REQ_FUNC, return_value=(200, self.EXISTING_HOSTS)):
+ self._set_args({'state': 'present', 'name': 'beegfs_metadata1', 'host_type': 'windows', 'force_port': True,
+ 'ports': [{'label': 'beegfs_metadata1_iscsi_12', 'type': 'iscsi',
+ 'port': 'iqn.1993-08.org.debian.beegfs-metadata:01:69e4efdf30b8'}]})
+ host = NetAppESeriesHost()
+ host.build_success_payload = lambda x: {}
+ exists = host.host_exists
+ self.assertTrue(host.needs_update)
+ host.update_host()
+
+ def test_update_host_fail(self):
+ """Verify update_host produces expected exceptions."""
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to update host."):
+ with mock.patch(self.REQ_FUNC, side_effect=[(200, self.EXISTING_HOSTS), Exception()]):
+ self._set_args({'state': 'present', 'name': 'beegfs_metadata1', 'host_type': 'windows', 'force_port': False,
+ 'ports': [{'label': 'beegfs_metadata1_iscsi_0', 'type': 'iscsi',
+ 'port': 'iqn.1993-08.org.debian.beegfs-metadata:01:69e4efdf30b8'}]})
+ host = NetAppESeriesHost()
+ host.build_success_payload = lambda x: {}
+ exists = host.host_exists
+ self.assertTrue(host.needs_update)
+ host.update_host()
+
+ def test_create_host_pass(self):
+ """Verify create_host produces expected results."""
+ def _assigned_host_ports(apply_unassigning=False):
+ return None
+
+ with self.assertRaises(AnsibleExitJson):
+ with mock.patch(self.REQ_FUNC, return_value=(200, {'id': '84000000600A098000A4B9D10030370B5D430109'})):
+ self._set_args({'state': 'present', 'name': 'beegfs_metadata1', 'host_type': 'windows', 'force_port': True,
+ 'ports': [{'label': 'beegfs_metadata1_iscsi_1', 'type': 'iscsi',
+ 'port': 'iqn.1993-08.org.debian.beegfs-storage1:01:b0621126818'}]})
+ host = NetAppESeriesHost()
+ with mock.patch(self.REQ_FUNC, return_value=(200, [])):
+ host.assigned_host_ports = _assigned_host_ports
+ host.build_success_payload = lambda x: {}
+ host.create_host()
+
+ def test_create_host_fail(self):
+ """Verify create_host produces expected exceptions."""
+ def _assigned_host_ports(apply_unassigning=False):
+ return None
+
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to create host."):
+ with mock.patch(self.REQ_FUNC, side_effect=[(200, []), Exception()]):
+ self._set_args({'state': 'present', 'name': 'beegfs_metadata1', 'host_type': 'windows', 'force_port': True,
+ 'ports': [{'label': 'beegfs_metadata1_iscsi_1', 'type': 'iscsi',
+ 'port': 'iqn.1993-08.org.debian.beegfs-storage1:01:b0621126818'}]})
+ host = NetAppESeriesHost()
+ host.assigned_host_ports = _assigned_host_ports
+ host.build_success_payload = lambda x: {}
+ host.create_host()
+
+ with self.assertRaisesRegexp(AnsibleExitJson, "Host already exists."):
+ with mock.patch(self.REQ_FUNC, side_effect=[(200, self.EXISTING_HOSTS)]):
+ self._set_args({'state': 'present', 'name': 'beegfs_storage1', 'host_type': 'linux dm-mp', 'force_port': True,
+ 'ports': [{'label': 'beegfs_storage1_iscsi_0', 'type': 'iscsi',
+ 'port': 'iqn.1993-08.org.debian.beegfs-storage1:01:b0621126818'}]})
+ host = NetAppESeriesHost()
+ host.assigned_host_ports = _assigned_host_ports
+ host.build_success_payload = lambda x: {}
+ host.create_host()
+
+ def test_remove_host_pass(self):
+ """Verify remove_host produces expected results."""
+ with mock.patch(self.REQ_FUNC, return_value=(200, None)):
+ self._set_args({'state': 'absent', 'name': 'beegfs_metadata1', 'host_type': 'linux dm-mp', 'force_port': False,
+ 'ports': [{'label': 'beegfs_metadata1_iscsi_0', 'type': 'iscsi',
+ 'port': 'iqn.1993-08.org.debian.beegfs-metadata:01:69e4efdf30b8'}]})
+ host = NetAppESeriesHost()
+ host.host_obj = {"id": "84000000600A098000A4B9D10030370B5D430109"}
+ host.remove_host()
+
+ def test_remove_host_fail(self):
+ """Verify remove_host produces expected exceptions."""
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to remove host."):
+ with mock.patch(self.REQ_FUNC, return_value=Exception()):
+ self._set_args({'state': 'absent', 'name': 'beegfs_metadata1', 'host_type': 'linux dm-mp', 'force_port': False,
+ 'ports': [{'label': 'beegfs_metadata1_iscsi_0', 'type': 'iscsi',
+ 'port': 'iqn.1993-08.org.debian.beegfs-metadata:01:69e4efdf30b8'}]})
+ host = NetAppESeriesHost()
+ host.host_obj = {"id": "84000000600A098000A4B9D10030370B5D430109"}
+ host.remove_host()
+
+ def test_build_success_payload(self):
+ """Validate success payload."""
+ def _assigned_host_ports(apply_unassigning=False):
+ return None
+
+ self._set_args({'state': 'present', 'name': 'beegfs_metadata1', 'host_type': 'windows', 'force_port': True,
+ 'ports': [{'label': 'beegfs_metadata1_iscsi_1', 'type': 'iscsi', 'port': 'iqn.1993-08.org.debian.beegfs-storage1:01:b0621126818'}]})
+ host = NetAppESeriesHost()
+ self.assertEquals(host.build_success_payload(), {'api_url': 'http://localhost/', 'ssid': '1'})
diff --git a/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_hostgroup.py b/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_hostgroup.py
new file mode 100644
index 000000000..6cecf0e8c
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_hostgroup.py
@@ -0,0 +1,140 @@
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_hostgroup import NetAppESeriesHostGroup
+from units.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args
+from units.compat import mock
+
+
+class HostTest(ModuleTestCase):
+ REQUIRED_PARAMS = {"api_username": "rw",
+ "api_password": "password",
+ "api_url": "http://localhost",
+ "ssid": "1"}
+ REQ_FUNC = "ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_hostgroup.NetAppESeriesHostGroup.request"
+ HOSTS_GET_RESPONSE = [
+ {"hostRef": "84000000600A098000A4B28D0030102E5C3DFC0F",
+ "clusterRef": "85000000600A098000A4B28D0036102C5C3DFC08", "id": "84000000600A098000A4B28D0030102E5C3DFC0F",
+ "name": "host1"},
+ {"hostRef": "84000000600A098000A4B28D003010315C3DFC11",
+ "clusterRef": "85000000600A098000A4B9D100360F765C3DFC1C", "id": "84000000600A098000A4B28D003010315C3DFC11",
+ "name": "host2"},
+ {"hostRef": "84000000600A098000A4B28D003010345C3DFC14",
+ "clusterRef": "85000000600A098000A4B9D100360F765C3DFC1C", "id": "84000000600A098000A4B28D003010345C3DFC14",
+ "name": "host3"}]
+ HOSTGROUPS_GET_RESPONSE = [
+ {"clusterRef": "85000000600A098000A4B28D0036102C5C3DFC08", "id": "85000000600A098000A4B28D0036102C5C3DFC08",
+ "name": "group1"},
+ {"clusterRef": "85000000600A098000A4B9D100360F765C3DFC1C", "id": "85000000600A098000A4B9D100360F765C3DFC1C",
+ "name": "group2"},
+ {"clusterRef": "85000000600A098000A4B9D100360F775C3DFC1E", "id": "85000000600A098000A4B9D100360F775C3DFC1E",
+ "name": "group3"}]
+
+ def _set_args(self, args):
+ self.module_args = self.REQUIRED_PARAMS.copy()
+ self.module_args.update(args)
+ set_module_args(self.module_args)
+
+ def test_hosts_fail(self):
+ """Ensure that the host property method fails when self.request throws an exception."""
+ self._set_args({"state": "present", "name": "hostgroup1", "hosts": ["host1", "host2"]})
+ hostgroup_object = NetAppESeriesHostGroup()
+ with self.assertRaises(AnsibleFailJson):
+ with mock.patch(self.REQ_FUNC, return_value=Exception()):
+ hosts = hostgroup_object.hosts
+
+ self._set_args({"state": "present", "name": "hostgroup1", "hosts": ["host1", "host2"]})
+ hostgroup_object = NetAppESeriesHostGroup()
+ with mock.patch(self.REQ_FUNC, return_value=(200, [])):
+ with self.assertRaisesRegexp(AnsibleFailJson, "Expected host does not exist"):
+ hosts = hostgroup_object.hosts
+
+ def test_hosts_pass(self):
+ """Evaluate hosts property method for valid returned data structure."""
+ expected_host_list = ['84000000600A098000A4B28D003010315C3DFC11', '84000000600A098000A4B28D0030102E5C3DFC0F']
+ for hostgroup_hosts in [["host1", "host2"], ["84000000600A098000A4B28D0030102E5C3DFC0F",
+ "84000000600A098000A4B28D003010315C3DFC11"]]:
+ self._set_args({"state": "present", "name": "hostgroup1", "hosts": hostgroup_hosts})
+ hostgroup_object = NetAppESeriesHostGroup()
+
+ with mock.patch(self.REQ_FUNC, return_value=(200, self.HOSTS_GET_RESPONSE)):
+ for item in hostgroup_object.hosts:
+ self.assertTrue(item in expected_host_list)
+
+ # Create hostgroup with no hosts
+ self._set_args({"state": "present", "name": "hostgroup1"})
+ hostgroup_object = NetAppESeriesHostGroup()
+ with mock.patch(self.REQ_FUNC, return_value=(200, [])):
+ self.assertEqual(hostgroup_object.hosts, [])
+
+ def test_host_groups_fail(self):
+ """Ensure that the host_groups property method fails when self.request throws an exception."""
+ self._set_args({"state": "present", "name": "hostgroup1", "hosts": ["host1", "host2"]})
+ hostgroup_object = NetAppESeriesHostGroup()
+ with self.assertRaises(AnsibleFailJson):
+ with mock.patch(self.REQ_FUNC, return_value=Exception()):
+ host_groups = hostgroup_object.host_groups
+
+ def test_host_groups_pass(self):
+ """Evaluate host_groups property method for valid return data structure."""
+ expected_groups = [
+ {'hosts': ['84000000600A098000A4B28D0030102E5C3DFC0F'], 'id': '85000000600A098000A4B28D0036102C5C3DFC08',
+ 'name': 'group1'},
+ {'hosts': ['84000000600A098000A4B28D003010315C3DFC11', '84000000600A098000A4B28D003010345C3DFC14'],
+ 'id': '85000000600A098000A4B9D100360F765C3DFC1C', 'name': 'group2'},
+ {'hosts': [], 'id': '85000000600A098000A4B9D100360F775C3DFC1E', 'name': 'group3'}]
+
+ self._set_args({"state": "present", "name": "hostgroup1", "hosts": ["host1", "host2"]})
+ hostgroup_object = NetAppESeriesHostGroup()
+
+ with mock.patch(self.REQ_FUNC,
+ side_effect=[(200, self.HOSTGROUPS_GET_RESPONSE), (200, self.HOSTS_GET_RESPONSE)]):
+ self.assertEqual(hostgroup_object.host_groups, expected_groups)
+
+ @mock.patch.object(NetAppESeriesHostGroup, "host_groups")
+ @mock.patch.object(NetAppESeriesHostGroup, "hosts")
+ @mock.patch.object(NetAppESeriesHostGroup, "create_host_group")
+ @mock.patch.object(NetAppESeriesHostGroup, "update_host_group")
+ @mock.patch.object(NetAppESeriesHostGroup, "delete_host_group")
+ def test_apply_pass(self, fake_delete_host_group, fake_update_host_group, fake_create_host_group, fake_hosts,
+ fake_host_groups):
+ """Apply desired host group state to the storage array."""
+ hosts_response = ['84000000600A098000A4B28D003010315C3DFC11', '84000000600A098000A4B28D0030102E5C3DFC0F']
+ host_groups_response = [
+ {'hosts': ['84000000600A098000A4B28D0030102E5C3DFC0F'], 'id': '85000000600A098000A4B28D0036102C5C3DFC08',
+ 'name': 'group1'},
+ {'hosts': ['84000000600A098000A4B28D003010315C3DFC11', '84000000600A098000A4B28D003010345C3DFC14'],
+ 'id': '85000000600A098000A4B9D100360F765C3DFC1C', 'name': 'group2'},
+ {'hosts': [], 'id': '85000000600A098000A4B9D100360F775C3DFC1E', 'name': 'group3'}]
+
+ fake_host_groups.return_value = host_groups_response
+ fake_hosts.return_value = hosts_response
+ fake_create_host_group.return_value = lambda x: "Host group created!"
+ fake_update_host_group.return_value = lambda x: "Host group updated!"
+ fake_delete_host_group.return_value = lambda x: "Host group deleted!"
+
+ # Test create new host group
+ self._set_args({"state": "present", "name": "hostgroup1", "hosts": ["host1", "host2"]})
+ hostgroup_object = NetAppESeriesHostGroup()
+ with self.assertRaises(AnsibleExitJson):
+ hostgroup_object.apply()
+
+ # Test make no changes to existing host group
+ self._set_args({"state": "present", "name": "group1", "hosts": ["host1"]})
+ hostgroup_object = NetAppESeriesHostGroup()
+ with self.assertRaises(AnsibleExitJson):
+ hostgroup_object.apply()
+
+ # Test add host to existing host group
+ self._set_args({"state": "present", "name": "group1", "hosts": ["host1", "host2"]})
+ hostgroup_object = NetAppESeriesHostGroup()
+ with self.assertRaises(AnsibleExitJson):
+ hostgroup_object.apply()
+
+ # Test delete existing host group
+ self._set_args({"state": "absent", "name": "group1"})
+ hostgroup_object = NetAppESeriesHostGroup()
+ with self.assertRaises(AnsibleExitJson):
+ hostgroup_object.apply()
diff --git a/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_ib_iser_interface.py b/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_ib_iser_interface.py
new file mode 100644
index 000000000..d2eca39f2
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_ib_iser_interface.py
@@ -0,0 +1,159 @@
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_ib_iser_interface import NetAppESeriesIbIserInterface
+from units.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args
+from units.compat import mock
+
+
+class NvmeInterfaceTest(ModuleTestCase):
+ REQUIRED_PARAMS = {"api_username": "rw",
+ "api_password": "password",
+ "api_url": "http://localhost",
+ "ssid": "1",
+ "controller": "A",
+ "channel": 1}
+
+ REQ_FUNC = "ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_ib_iser_interface.NetAppESeriesIbIserInterface.request"
+
+ def _set_args(self, args=None):
+ module_args = self.REQUIRED_PARAMS.copy()
+ if args is not None:
+ module_args.update(args)
+ set_module_args(module_args)
+
+ def test_invalid_options_fail(self):
+ """Verify invalid options fail."""
+ options_list = [{"address": "nonaddress@somewhere.com"},
+ {"address": "192.168.100.1000"},
+ {"address": "1192.168.100.100"}]
+
+ for options in options_list:
+ self._set_args(options)
+ with self.assertRaisesRegexp(AnsibleFailJson, "An invalid ip address was provided for address."):
+ iface = NetAppESeriesIbIserInterface()
+
+ def test_get_interfaces_pass(self):
+ """Verify get_interfaces method passes."""
+ self._set_args({"address": "192.168.100.100"})
+ iface = NetAppESeriesIbIserInterface()
+ with mock.patch(self.REQ_FUNC, return_value=(200, [{"interfaceType": "iscsi", "iscsi": {"interfaceData": {"type": "infiniband",
+ "infinibandData": {"isIser": True}}}},
+ {"interfaceType": "iscsi", "iscsi": {"interfaceData": {"type": "infiniband",
+ "infinibandData": {"isIser": True}}}},
+ {"interfaceType": "fc", "fc": {}}])):
+ self.assertEquals(iface.get_interfaces(),
+ [{'interfaceType': 'iscsi', 'iscsi': {'interfaceData': {'type': 'infiniband', 'infinibandData': {'isIser': True}}}},
+ {'interfaceType': 'iscsi', 'iscsi': {'interfaceData': {'type': 'infiniband', 'infinibandData': {'isIser': True}}}}])
+
+ def test_get_interfaces_fails(self):
+ """Verify get_interfaces method throws expected exceptions."""
+ self._set_args({"address": "192.168.100.100"})
+ iface = NetAppESeriesIbIserInterface()
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to retrieve defined host interfaces."):
+ with mock.patch(self.REQ_FUNC, return_value=Exception()):
+ iface.get_interfaces()
+
+ self._set_args({"address": "192.168.100.100"})
+ iface = NetAppESeriesIbIserInterface()
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to detect any InfiniBand iSER interfaces!"):
+ with mock.patch(self.REQ_FUNC, return_value=(200, [{"interfaceType": "eth", "eth": {"interfaceData": {"type": "ethernet",
+ "infinibandData": {"isIser": False}}}},
+ {"interfaceType": "iscsi", "iscsi": {"interfaceData": {"type": "infiniband",
+ "infinibandData": {"isIser": False}}}},
+ {"interfaceType": "fc", "fc": {}}])):
+ iface.get_interfaces()
+
+ def test_get_ib_link_status_pass(self):
+ """Verify expected data structure."""
+ self._set_args({"address": "192.168.100.100"})
+ iface = NetAppESeriesIbIserInterface()
+ with mock.patch(self.REQ_FUNC, return_value=(200, {"ibPorts": [{"channelPortRef": 1, "linkState": "active"},
+ {"channelPortRef": 2, "linkState": "down"},
+ {"channelPortRef": 3, "linkState": "down"},
+ {"channelPortRef": 4, "linkState": "active"}]})):
+ self.assertEquals(iface.get_ib_link_status(), {1: 'active', 2: 'down', 3: 'down', 4: 'active'})
+
+ def test_get_ib_link_status_fail(self):
+ """Verify expected exception is thrown."""
+ self._set_args({"address": "192.168.100.100"})
+ iface = NetAppESeriesIbIserInterface()
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to retrieve ib link status information!"):
+ with mock.patch(self.REQ_FUNC, return_value=Exception()):
+ statuses = iface.get_ib_link_status()
+
+ def test_is_change_required_pass(self):
+ """Verify is_change_required method returns expected values."""
+ self._set_args({"address": "192.168.100.100"})
+ iface = NetAppESeriesIbIserInterface()
+ iface.get_target_interface = lambda: {"iscsi": {"ipv4Data": {"ipv4AddressData": {"ipv4Address": "192.168.1.1"}}}}
+ self.assertTrue(iface.is_change_required())
+
+ self._set_args({"address": "192.168.100.100"})
+ iface = NetAppESeriesIbIserInterface()
+ iface.get_target_interface = lambda: {"iscsi": {"ipv4Data": {"ipv4AddressData": {"ipv4Address": "192.168.100.100"}}}}
+ self.assertFalse(iface.is_change_required())
+
+ def test_make_request_body_pass(self):
+ """Verify expected request body."""
+ self._set_args({"address": "192.168.100.100"})
+ iface = NetAppESeriesIbIserInterface()
+ iface.get_target_interface = lambda: {"iscsi": {"id": "1234", "ipv4Data": {"ipv4AddressData": {"ipv4Address": "192.168.1.1"}}}}
+ self.assertEquals(iface.make_request_body(), {"iscsiInterface": "1234",
+ "settings": {"tcpListenPort": [],
+ "ipv4Address": ["192.168.100.100"],
+ "ipv4SubnetMask": [],
+ "ipv4GatewayAddress": [],
+ "ipv4AddressConfigMethod": [],
+ "maximumFramePayloadSize": [],
+ "ipv4VlanId": [],
+ "ipv4OutboundPacketPriority": [],
+ "ipv4Enabled": [],
+ "ipv6Enabled": [],
+ "ipv6LocalAddresses": [],
+ "ipv6RoutableAddresses": [],
+ "ipv6PortRouterAddress": [],
+ "ipv6AddressConfigMethod": [],
+ "ipv6OutboundPacketPriority": [],
+ "ipv6VlanId": [],
+ "ipv6HopLimit": [],
+ "ipv6NdReachableTime": [],
+ "ipv6NdRetransmitTime": [],
+ "ipv6NdStaleTimeout": [],
+ "ipv6DuplicateAddressDetectionAttempts": [],
+ "maximumInterfaceSpeed": []}})
+
+ def test_update_pass(self):
+ """Verify update method behavior."""
+ self._set_args({"address": "192.168.100.100"})
+ iface = NetAppESeriesIbIserInterface()
+ iface.is_change_required = lambda: False
+ with self.assertRaisesRegexp(AnsibleExitJson, "No changes were required."):
+ iface.update()
+
+ self._set_args({"address": "192.168.100.100"})
+ iface = NetAppESeriesIbIserInterface()
+ iface.is_change_required = lambda: True
+ iface.check_mode = True
+ with self.assertRaisesRegexp(AnsibleExitJson, "No changes were required."):
+ iface.update()
+
+ self._set_args({"address": "192.168.100.100"})
+ iface = NetAppESeriesIbIserInterface()
+ iface.is_change_required = lambda: True
+ iface.make_request_body = lambda: {}
+ with self.assertRaisesRegexp(AnsibleExitJson, "The interface settings have been updated."):
+ with mock.patch(self.REQ_FUNC, return_value=(200, None)):
+ iface.update()
+
+ def test_update_fail(self):
+ """Verify exceptions are thrown."""
+ self._set_args({"address": "192.168.100.100"})
+ iface = NetAppESeriesIbIserInterface()
+ iface.is_change_required = lambda: True
+ iface.make_request_body = lambda: {}
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to modify the interface!"):
+ with mock.patch(self.REQ_FUNC, return_value=Exception()):
+ iface.update()
diff --git a/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_iscsi_interface.py b/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_iscsi_interface.py
new file mode 100644
index 000000000..de9617e6d
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_iscsi_interface.py
@@ -0,0 +1,239 @@
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_iscsi_interface import NetAppESeriesIscsiInterface
+from units.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args
+from units.compat import mock
+
+
+class IscsiInterfaceTest(ModuleTestCase):
+ REQUIRED_PARAMS = {
+ 'api_username': 'rw',
+ 'api_password': 'password',
+ 'api_url': 'http://localhost',
+ 'ssid': '1',
+ 'state': 'disabled',
+ 'port': 1,
+ 'controller': 'A',
+ }
+ REQ_FUNC = 'ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_iscsi_interface.NetAppESeriesIscsiInterface.request'
+
+ def _set_args(self, args=None):
+ module_args = self.REQUIRED_PARAMS.copy()
+ if args is not None:
+ module_args.update(args)
+ set_module_args(module_args)
+
+ def test_validate_params(self):
+ """Ensure we can pass valid parameters to the module"""
+ # Provide a range of valid values for each
+ for controller in ['A', 'B']:
+ for i in range(1, 10):
+ for mtu in [1500, 2500, 9000]:
+ self._set_args(dict(
+ state='disabled',
+ port=i,
+ controller=controller,
+ mtu=mtu,
+ ))
+ iface = NetAppESeriesIscsiInterface()
+
+ def test_invalid_params(self):
+ """Ensure that our input validation catches invalid parameters"""
+
+ # Currently a 'C' controller is invalid
+ self._set_args(dict(
+ state='disabled',
+ port=1,
+ controller="C",
+ ))
+ with self.assertRaises(AnsibleFailJson) as result:
+ iface = NetAppESeriesIscsiInterface()
+
+ # Each of these mtu values are invalid
+ for mtu in [500, 1499, 9001]:
+ self._set_args({
+ 'state': 'disabled',
+ 'port': 1,
+ 'controller': 'A',
+ 'mtu': mtu
+ })
+ with self.assertRaises(AnsibleFailJson) as result:
+ iface = NetAppESeriesIscsiInterface()
+
+ def test_interfaces(self):
+ """Validate that we are processing the interface list properly"""
+ self._set_args()
+ interfaces = [{"interfaceType": "iscsi", "iscsi": {"interfaceData": {"type": "ethernet"}}},
+ {"interfaceType": "iscsi", "iscsi": {"interfaceData": {"type": "ethernet"}}},
+ {"interfaceType": "fc", "iscsi": {"interfaceData": {"type": "ethernet"}}}]
+
+ # Ensure we filter out anything without an interfaceType of iscsi
+ expected = [iface for iface in interfaces if iface['interfaceType'] == 'iscsi']
+
+ # We expect a single call to the API: retrieve the list of interfaces from the objectGraph.
+ with mock.patch(self.REQ_FUNC, return_value=(200, interfaces)):
+ iface = NetAppESeriesIscsiInterface()
+ interfaces = iface.interfaces
+ self.assertEquals(interfaces, expected)
+
+ def test_interfaces_fail(self):
+ """Ensure we fail gracefully on an error to retrieve the interfaces"""
+ self._set_args()
+
+ with self.assertRaises(AnsibleFailJson) as result:
+ # Simulate a failed call to the API
+ with mock.patch(self.REQ_FUNC, side_effect=Exception("Failure")):
+ iface = NetAppESeriesIscsiInterface()
+ interfaces = iface.interfaces
+
+ def test_get_target_interface_bad_port(self):
+ """Ensure we fail correctly when a bad port is provided"""
+ self._set_args()
+
+ interfaces = [{"iscsi": {"port": 1, "controllerId": "1"}}]
+
+ with self.assertRaisesRegexp(AnsibleFailJson, r"Invalid controller.*?iSCSI port."):
+ with mock.patch.object(NetAppESeriesIscsiInterface, 'interfaces', return_value=interfaces):
+ iface = NetAppESeriesIscsiInterface()
+ interfaces = iface.get_target_interface()
+
+ def test_make_update_body_dhcp(self):
+ """Ensure the update body generates correctly for a transition from static to dhcp"""
+ self._set_args(dict(state='enabled',
+ config_method='dhcp')
+ )
+
+ iface = {"iscsi": {"id": 1,
+ "ipv4Enabled": False,
+ "ipv4Data": {"ipv4AddressData": {"ipv4Address": "0.0.0.0",
+ "ipv4SubnetMask": "0.0.0.0",
+ "ipv4GatewayAddress": "0.0.0.0"},
+ "ipv4AddressConfigMethod": "configStatic"},
+ "interfaceData": {"ethernetData": {"maximumFramePayloadSize": 1500}}}}
+
+ # Test a transition from static to dhcp
+ inst = NetAppESeriesIscsiInterface()
+ update, body = inst.make_update_body(iface)
+ self.assertTrue(update, msg="An update was expected!")
+ self.assertEquals(body['settings']['ipv4Enabled'][0], True)
+ self.assertEquals(body['settings']['ipv4AddressConfigMethod'][0], 'configDhcp')
+
+ def test_make_update_body_static(self):
+ """Ensure the update body generates correctly for a transition from dhcp to static"""
+ iface = {"iscsi": {"id": 1,
+ "ipv4Enabled": False,
+ "ipv4Data": {"ipv4AddressData": {"ipv4Address": "0.0.0.0",
+ "ipv4SubnetMask": "0.0.0.0",
+ "ipv4GatewayAddress": "0.0.0.0"},
+ "ipv4AddressConfigMethod": "configDhcp"},
+ "interfaceData": {"ethernetData": {"maximumFramePayloadSize": 1500}}}}
+
+ self._set_args(dict(state='enabled',
+ config_method='static',
+ address='10.10.10.10',
+ subnet_mask='255.255.255.0',
+ gateway='1.1.1.1'))
+
+ inst = NetAppESeriesIscsiInterface()
+ update, body = inst.make_update_body(iface)
+ self.assertTrue(update, msg="An update was expected!")
+ self.assertEquals(body['settings']['ipv4Enabled'][0], True)
+ self.assertEquals(body['settings']['ipv4AddressConfigMethod'][0], 'configStatic')
+ self.assertEquals(body['settings']['ipv4Address'][0], '10.10.10.10')
+ self.assertEquals(body['settings']['ipv4SubnetMask'][0], '255.255.255.0')
+ self.assertEquals(body['settings']['ipv4GatewayAddress'][0], '1.1.1.1')
+
+ CONTROLLERS = dict(A='1', B='2')
+
+ def test_update_bad_controller(self):
+ """Ensure a bad controller fails gracefully"""
+ self._set_args(dict(controller='B'))
+
+ inst = NetAppESeriesIscsiInterface()
+ with self.assertRaises(AnsibleFailJson) as result:
+ with mock.patch.object(inst, 'get_controllers', return_value=dict(A='1')) as get_controllers:
+ inst.update()
+
+ @mock.patch.object(NetAppESeriesIscsiInterface, 'get_controllers', return_value=CONTROLLERS)
+ def test_update(self, get_controllers):
+ """Validate the good path"""
+ self._set_args()
+
+ inst = NetAppESeriesIscsiInterface()
+ with self.assertRaises(AnsibleExitJson):
+ with mock.patch(self.REQ_FUNC, return_value=(200, "")) as request:
+ with mock.patch.object(inst, 'get_target_interface', side_effect=[{}, mock.MagicMock()]):
+ with mock.patch.object(inst, 'make_update_body', return_value=(True, {})):
+ inst.update()
+ request.assert_called_once()
+
+ @mock.patch.object(NetAppESeriesIscsiInterface, 'get_controllers', return_value=CONTROLLERS)
+ def test_update_not_required(self, get_controllers):
+ """Ensure we don't trigger the update if one isn't required or if check mode is enabled"""
+ self._set_args()
+
+ # make_update_body will report that no change is required, so we should see no call to the API.
+ inst = NetAppESeriesIscsiInterface()
+ with self.assertRaises(AnsibleExitJson) as result:
+ with mock.patch(self.REQ_FUNC, return_value=(200, "")) as request:
+ with mock.patch.object(inst, 'get_target_interface', side_effect=[{}, mock.MagicMock()]):
+ with mock.patch.object(inst, 'make_update_body', return_value=(False, {})):
+ inst.update()
+ request.assert_not_called()
+ self.assertFalse(result.exception.args[0]['changed'], msg="No change was expected.")
+
+ # Since check_mode is enabled, we will run everything normally, but not make a request to the API
+ # to perform the actual change.
+ inst = NetAppESeriesIscsiInterface()
+ inst.check_mode = True
+ with self.assertRaises(AnsibleExitJson) as result:
+ with mock.patch(self.REQ_FUNC, return_value=(200, "")) as request:
+ with mock.patch.object(inst, 'get_target_interface', side_effect=[{}, mock.MagicMock()]):
+ with mock.patch.object(inst, 'make_update_body', return_value=(True, {})):
+ inst.update()
+ request.assert_not_called()
+ self.assertTrue(result.exception.args[0]['changed'], msg="A change was expected.")
+
+ @mock.patch.object(NetAppESeriesIscsiInterface, 'get_controllers', return_value=CONTROLLERS)
+ def test_update_fail_busy(self, get_controllers):
+ """Ensure we fail correctly on receiving a busy response from the API."""
+ self._set_args()
+
+ inst = NetAppESeriesIscsiInterface()
+ with self.assertRaisesRegexp(AnsibleFailJson, r".*?busy.*") as result:
+ with mock.patch(self.REQ_FUNC, return_value=(422, dict(retcode="3"))) as request:
+ with mock.patch.object(inst, 'get_target_interface', side_effect=[{}, mock.MagicMock()]):
+ with mock.patch.object(inst, 'make_update_body', return_value=(True, {})):
+ inst.update()
+ request.assert_called_once()
+
+ @mock.patch.object(NetAppESeriesIscsiInterface, 'get_controllers', return_value=CONTROLLERS)
+ @mock.patch.object(NetAppESeriesIscsiInterface, 'make_update_body', return_value=(True, {}))
+ def test_update_fail(self, get_controllers, make_body):
+ """Ensure we fail correctly on receiving a normal failure from the API."""
+ self._set_args()
+
+ inst = NetAppESeriesIscsiInterface()
+ # Test a 422 error with a non-busy status
+ with self.assertRaisesRegexp(AnsibleFailJson, r".*?Failed to modify.*") as result:
+ with mock.patch(self.REQ_FUNC, return_value=(422, mock.MagicMock())) as request:
+ with mock.patch.object(inst, 'get_target_interface', side_effect=[{}, mock.MagicMock()]):
+ inst.update()
+ request.assert_called_once()
+
+ # Test a 401 (authentication) error
+ with self.assertRaisesRegexp(AnsibleFailJson, r".*?Failed to modify.*") as result:
+ with mock.patch(self.REQ_FUNC, return_value=(401, mock.MagicMock())) as request:
+ with mock.patch.object(inst, 'get_target_interface', side_effect=[{}, mock.MagicMock()]):
+ inst.update()
+ request.assert_called_once()
+
+ # Test with a connection failure
+ with self.assertRaisesRegexp(AnsibleFailJson, r".*?Connection failure.*") as result:
+ with mock.patch(self.REQ_FUNC, side_effect=Exception()) as request:
+ with mock.patch.object(inst, 'get_target_interface', side_effect=[{}, mock.MagicMock()]):
+ inst.update()
+ request.assert_called_once()
diff --git a/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_iscsi_target.py b/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_iscsi_target.py
new file mode 100644
index 000000000..93ccafe47
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_iscsi_target.py
@@ -0,0 +1,188 @@
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_iscsi_target import NetAppESeriesIscsiTarget
+from units.modules.utils import AnsibleFailJson, AnsibleExitJson, ModuleTestCase, set_module_args
+from units.compat import mock
+
+
+class IscsiTargetTest(ModuleTestCase):
+ REQUIRED_PARAMS = {"api_username": "admin", "api_password": "adminpassword", "api_url": "http://localhost", "ssid": "1", "name": "abc"}
+ CHAP_SAMPLE = "a" * 14
+ REQ_FUNC = "ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_iscsi_target.NetAppESeriesIscsiTarget.request"
+ TARGET_REQUEST_RESPONSE = [{"targetRef": "90000000600A098000A4B28D00334A065DA9D747",
+ "nodeName": {"ioInterfaceType": "iscsi",
+ "iscsiNodeName": "iqn.1992-08.com.netapp:2806.600a098000a4b28d000000005da9d744",
+ "remoteNodeWWN": None, "nvmeNodeName": None},
+ "alias": {"ioInterfaceType": "iscsi",
+ "iscsiAlias": "target_name"},
+ "configuredAuthMethods": {"authMethodData": [{"authMethod": "none",
+ "chapSecret": None}]},
+ "portals": [{"groupTag": 2,
+ "ipAddress": {"addressType": "ipv4",
+ "ipv4Address": "10.10.10.110",
+ "ipv6Address": None},
+ "tcpListenPort": 3260},
+ {"groupTag": 2,
+ "ipAddress": {"addressType": "ipv6",
+ "ipv4Address": None,
+ "ipv6Address": "FE8000000000000002A098FFFEA4B9D7"},
+ "tcpListenPort": 3260},
+ {"groupTag": 2,
+ "ipAddress": {"addressType": "ipv4",
+ "ipv4Address": "10.10.10.112",
+ "ipv6Address": None},
+ "tcpListenPort": 3260},
+ {"groupTag": 1, "ipAddress": {"addressType": "ipv4",
+ "ipv4Address": "10.10.11.110",
+ "ipv6Address": None},
+ "tcpListenPort": 3260},
+ {"groupTag": 1,
+ "ipAddress": {"addressType": "ipv6",
+ "ipv4Address": None,
+ "ipv6Address": "FE8000000000000002A098FFFEA4B293"},
+ "tcpListenPort": 3260},
+ {"groupTag": 1,
+ "ipAddress": {"addressType": "ipv4",
+ "ipv4Address": "10.10.11.112",
+ "ipv6Address": None},
+ "tcpListenPort": 3260}]}]
+ ISCSI_ENTRY_DATA_RESPONSE = [{"icmpPingResponseEnabled": False,
+ "unnamedDiscoverySessionsEnabled": False,
+ "isnsServerTcpListenPort": 0,
+ "ipv4IsnsServerAddressConfigMethod": "configDhcp",
+ "ipv4IsnsServerAddress": "0.0.0.0",
+ "ipv6IsnsServerAddressConfigMethod": "configStatic",
+ "ipv6IsnsServerAddress": "00000000000000000000000000000000",
+ "isnsRegistrationState": "__UNDEFINED",
+ "isnsServerRegistrationEnabled": False,
+ "hostPortsConfiguredDHCP": False}]
+
+ def _set_args(self, args=None):
+ module_args = self.REQUIRED_PARAMS.copy()
+ if args is not None:
+ module_args.update(args)
+ set_module_args(module_args)
+
+ def test_validate_params(self):
+ """Ensure we can pass valid parameters to the module"""
+ for i in range(12, 57):
+ secret = 'a' * i
+ self._set_args(dict(chap=secret))
+ tgt = NetAppESeriesIscsiTarget()
+
+ def test_invalid_chap_secret(self):
+ for secret in [11 * 'a', 58 * 'a']:
+ with self.assertRaisesRegexp(AnsibleFailJson, r'.*?CHAP secret is not valid.*') as result:
+ self._set_args(dict(chap=secret))
+ tgt = NetAppESeriesIscsiTarget()
+
+ def test_target_pass(self):
+ """Ensure target property returns the expected data structure."""
+ expected_response = {"alias": "target_name", "chap": False, "iqn": "iqn.1992-08.com.netapp:2806.600a098000a4b28d000000005da9d744",
+ "ping": False, "unnamed_discovery": False}
+
+ self._set_args({"name": "target_name", "ping": True, "unnamed_discovery": True})
+ iscsi_target = NetAppESeriesIscsiTarget()
+
+ with mock.patch(self.REQ_FUNC, side_effect=[(200, self.TARGET_REQUEST_RESPONSE), (200, self.ISCSI_ENTRY_DATA_RESPONSE)]):
+ self.assertEquals(iscsi_target.target, expected_response)
+
+ def test_target_fail(self):
+ """Ensure target property returns the expected data structure."""
+ self._set_args({"name": "target_name", "ping": True, "unnamed_discovery": True})
+ iscsi_target = NetAppESeriesIscsiTarget()
+
+ with self.assertRaisesRegexp(AnsibleFailJson, r"Failed to retrieve the iSCSI target information."):
+ with mock.patch(self.REQ_FUNC, return_value=Exception()):
+ result = iscsi_target.target
+
+ with self.assertRaisesRegexp(AnsibleFailJson, r"Failed to retrieve the iSCSI target information."):
+ with mock.patch(self.REQ_FUNC, side_effect=[(200, self.TARGET_REQUEST_RESPONSE), Exception()]):
+ result = iscsi_target.target
+
+ with self.assertRaisesRegexp(AnsibleFailJson, r"This storage-system does not appear to have iSCSI interfaces."):
+ with mock.patch(self.REQ_FUNC, return_value=(200, [])):
+ result = iscsi_target.target
+
+ def test_apply_iscsi_settings_pass(self):
+ """Ensure apply_iscsi_settings succeeds properly."""
+ self._set_args({"name": "not_target_name"})
+ iscsi_target = NetAppESeriesIscsiTarget()
+ with mock.patch(self.REQ_FUNC, side_effect=[(200, self.TARGET_REQUEST_RESPONSE), (200, self.ISCSI_ENTRY_DATA_RESPONSE), (200, [])]):
+ self.assertTrue(iscsi_target.apply_iscsi_settings())
+
+ self._set_args({"name": "target_name"})
+ iscsi_target = NetAppESeriesIscsiTarget()
+ with mock.patch(self.REQ_FUNC, side_effect=[(200, self.TARGET_REQUEST_RESPONSE), (200, self.ISCSI_ENTRY_DATA_RESPONSE), (200, [])]):
+ self.assertFalse(iscsi_target.apply_iscsi_settings())
+
+ def test_apply_iscsi_settings_fail(self):
+ """Ensure apply_iscsi_settings fails properly."""
+ self._set_args({"name": "not_target_name"})
+ iscsi_target = NetAppESeriesIscsiTarget()
+ with self.assertRaisesRegexp(AnsibleFailJson, r"Failed to update the iSCSI target settings."):
+ with mock.patch(self.REQ_FUNC, side_effect=[(200, self.TARGET_REQUEST_RESPONSE), (200, self.ISCSI_ENTRY_DATA_RESPONSE), Exception()]):
+ self.assertTrue(iscsi_target.apply_iscsi_settings())
+
+ def test_apply_target_changes_pass(self):
+ """Ensure apply_iscsi_settings succeeds properly."""
+ self._set_args({"name": "target_name", "ping": True, "unnamed_discovery": True})
+ iscsi_target = NetAppESeriesIscsiTarget()
+ with mock.patch(self.REQ_FUNC, side_effect=[(200, self.TARGET_REQUEST_RESPONSE), (200, self.ISCSI_ENTRY_DATA_RESPONSE), (200, [])]):
+ self.assertTrue(iscsi_target.apply_target_changes())
+
+ self._set_args({"name": "target_name", "ping": False, "unnamed_discovery": True})
+ iscsi_target = NetAppESeriesIscsiTarget()
+ with mock.patch(self.REQ_FUNC, side_effect=[(200, self.TARGET_REQUEST_RESPONSE), (200, self.ISCSI_ENTRY_DATA_RESPONSE), (200, [])]):
+ self.assertTrue(iscsi_target.apply_target_changes())
+
+ self._set_args({"name": "target_name", "ping": True, "unnamed_discovery": False})
+ iscsi_target = NetAppESeriesIscsiTarget()
+ with mock.patch(self.REQ_FUNC, side_effect=[(200, self.TARGET_REQUEST_RESPONSE), (200, self.ISCSI_ENTRY_DATA_RESPONSE), (200, [])]):
+ self.assertTrue(iscsi_target.apply_target_changes())
+
+ self._set_args({"name": "target_name", "ping": False, "unnamed_discovery": False})
+ iscsi_target = NetAppESeriesIscsiTarget()
+ with mock.patch(self.REQ_FUNC, side_effect=[(200, self.TARGET_REQUEST_RESPONSE), (200, self.ISCSI_ENTRY_DATA_RESPONSE), (200, [])]):
+ self.assertFalse(iscsi_target.apply_target_changes())
+
+ def test_apply_target_changes_fail(self):
+ """Ensure apply_iscsi_settings fails properly."""
+ self._set_args({"name": "target_name", "ping": True, "unnamed_discovery": True})
+ iscsi_target = NetAppESeriesIscsiTarget()
+
+ with self.assertRaisesRegexp(AnsibleFailJson, r"Failed to update the iSCSI target settings."):
+ with mock.patch(self.REQ_FUNC, side_effect=[(200, self.TARGET_REQUEST_RESPONSE), (200, self.ISCSI_ENTRY_DATA_RESPONSE), Exception()]):
+ iscsi_target.apply_target_changes()
+
+ def test_update_pass(self):
+ """Ensure update successfully exists."""
+ self._set_args({"name": "target_name", "ping": True, "unnamed_discovery": True})
+ iscsi_target = NetAppESeriesIscsiTarget()
+
+ iscsi_target.apply_iscsi_settings = lambda: True
+ iscsi_target.apply_target_changes = lambda: True
+ with self.assertRaisesRegexp(AnsibleExitJson, r"\'changed\': True"):
+ with mock.patch(self.REQ_FUNC, side_effect=[(200, self.TARGET_REQUEST_RESPONSE), (200, self.ISCSI_ENTRY_DATA_RESPONSE)]):
+ iscsi_target.update()
+
+ iscsi_target.apply_iscsi_settings = lambda: False
+ iscsi_target.apply_target_changes = lambda: True
+ with self.assertRaisesRegexp(AnsibleExitJson, r"\'changed\': True"):
+ with mock.patch(self.REQ_FUNC, side_effect=[(200, self.TARGET_REQUEST_RESPONSE), (200, self.ISCSI_ENTRY_DATA_RESPONSE)]):
+ iscsi_target.update()
+
+ iscsi_target.apply_iscsi_settings = lambda: True
+ iscsi_target.apply_target_changes = lambda: False
+ with self.assertRaisesRegexp(AnsibleExitJson, r"\'changed\': True"):
+ with mock.patch(self.REQ_FUNC, side_effect=[(200, self.TARGET_REQUEST_RESPONSE), (200, self.ISCSI_ENTRY_DATA_RESPONSE)]):
+ iscsi_target.update()
+
+ iscsi_target.apply_iscsi_settings = lambda: False
+ iscsi_target.apply_target_changes = lambda: False
+ with self.assertRaisesRegexp(AnsibleExitJson, r"\'changed\': False"):
+ with mock.patch(self.REQ_FUNC, side_effect=[(200, self.TARGET_REQUEST_RESPONSE), (200, self.ISCSI_ENTRY_DATA_RESPONSE)]):
+ iscsi_target.update()
diff --git a/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_ldap.py b/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_ldap.py
new file mode 100644
index 000000000..69bf26742
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_ldap.py
@@ -0,0 +1,371 @@
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_ldap import NetAppESeriesLdap
+from units.modules.utils import ModuleTestCase, set_module_args, AnsibleFailJson, AnsibleExitJson
+from units.compat import mock
+
+
+class LdapTest(ModuleTestCase):
+ REQUIRED_PARAMS = {
+ "api_username": "admin",
+ "api_password": "password",
+ "api_url": "http://localhost",
+ "ssid": "1"}
+ REQ_FUNC = "ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_ldap.NetAppESeriesLdap.request"
+ BASE_REQ_FUNC = "ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity.request"
+
+ GET_DOMAINS = {"version": "3",
+ "ldapDomains": [{"id": "test1",
+ "bindLookupUser": {"password": "***", "user": "CN=cn,OU=accounts,DC=test1,DC=example,DC=com"},
+ "groupAttributes": ["memberOf"],
+ "ldapUrl": "ldap://test.example.com:389",
+ "names": ["test.example.com"],
+ "roleMapCollection": [{"groupRegex": ".*", "ignoreCase": False, "name": "storage.monitor"}],
+ "searchBase": "OU=accounts,DC=test,DC=example,DC=com",
+ "userAttribute": "sAMAccountName"},
+ {"id": "test2",
+ "bindLookupUser": {"password": "***", "user": "CN=cn,OU=accounts,DC=test2,DC=example,DC=com"},
+ "groupAttributes": ["memberOf"],
+ "ldapUrl": "ldap://test2.example.com:389",
+ "names": ["test2.example.com"],
+ "roleMapCollection": [{"groupRegex": ".*", "ignoreCase": False, "name": "storage.admin"},
+ {"groupRegex": ".*", "ignoreCase": False, "name": "support.admin"},
+ {"groupRegex": ".*", "ignoreCase": False, "name": "security.admin"},
+ {"groupRegex": ".*", "ignoreCase": False, "name": "storage.monitor"}],
+ "searchBase": "OU=accounts,DC=test2,DC=example,DC=com",
+ "userAttribute": "sAMAccountName"}]}
+
+ def _set_args(self, args=None):
+ module_args = self.REQUIRED_PARAMS.copy()
+ if args is not None:
+ module_args.update(args)
+ set_module_args(module_args)
+
+ def test_valid_options_pass(self):
+ """Verify valid options."""
+ options_list = [{"state": "disabled"},
+ {"state": "absent", "identifier": "test_domain"},
+ {"state": "present", "identifier": "test_domain", "server_url": "ldap://test.example.com:389",
+ "search_base": "ou=accounts,DC=test,DC=example,DC=com"},
+ {"state": "present", "identifier": "test_domain", "server_url": "ldap://test.example.com:389",
+ "search_base": "ou=accounts,DC=test,DC=example,DC=com", "bind_user": "admin", "bind_password": "adminpass"},
+ {"state": "present", "identifier": "test_domain", "server_url": "ldap://test.example.com:389",
+ "search_base": "ou=accounts,DC=test,DC=example,DC=com", "bind_user": "admin", "bind_password": "adminpass",
+ "names": ["name1", "name2"], "group_attributes": ["group_attr1", "group_attr1"], "user_attribute": "user_attr"}]
+
+ for options in options_list:
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.10.0000.0001"}), (200, {"runningAsProxy": True})]):
+ self._set_args(options)
+ ldap = NetAppESeriesLdap()
+ for options in options_list:
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.10.0000.0001"}), (200, {"runningAsProxy": False})]):
+ self._set_args(options)
+ ldap = NetAppESeriesLdap()
+
+ def test_get_domain_pass(self):
+ """Verify get_domain returns expected data structure."""
+ options = {"state": "present", "identifier": "test_domain", "server_url": "ldap://test.example.com:389",
+ "search_base": "ou=accounts,DC=test,DC=example,DC=com", "bind_user": "admin", "bind_password": "adminpass",
+ "names": ["name1", "name2"], "group_attributes": ["group_attr1", "group_attr1"], "user_attribute": "user_attr"}
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.10.0000.0001"}), (200, {"runningAsProxy": True})]):
+ with mock.patch(self.REQ_FUNC, return_value=(200, self.GET_DOMAINS)):
+ self._set_args(options)
+ ldap = NetAppESeriesLdap()
+ self.assertEquals(ldap.get_domains(), self.GET_DOMAINS["ldapDomains"])
+
+ def test_get_domain_fail(self):
+ """Verify get_domain throws expected exceptions."""
+ options = {"state": "present", "identifier": "test_domain", "server_url": "ldap://test.example.com:389",
+ "search_base": "ou=accounts,DC=test,DC=example,DC=com", "bind_user": "admin", "bind_password": "adminpass",
+ "names": ["name1", "name2"], "group_attributes": ["group_attr1", "group_attr1"], "user_attribute": "user_attr"}
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.10.0000.0001"}), (200, {"runningAsProxy": True})]):
+ with mock.patch(self.REQ_FUNC, return_value=Exception()):
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to retrieve current LDAP configuration."):
+ self._set_args(options)
+ ldap = NetAppESeriesLdap()
+ ldap.get_domains()
+
+ def test_build_request_body_pass(self):
+ """Verify build_request_body builds expected data structure."""
+ options_list = [{"state": "present", "identifier": "test_domain", "server_url": "ldap://test.example.com:389",
+ "search_base": "ou=accounts,DC=test,DC=example,DC=com"},
+ {"state": "present", "identifier": "test_domain", "server_url": "ldap://test.example.com:389",
+ "search_base": "ou=accounts,DC=test,DC=example,DC=com", "bind_user": "admin", "bind_password": "adminpass"},
+ {"state": "present", "identifier": "test_domain", "server_url": "ldap://test.example.com:389",
+ "search_base": "ou=accounts,DC=test,DC=example,DC=com", "bind_user": "admin", "bind_password": "adminpass",
+ "names": ["name1", "name2"], "group_attributes": ["group_attr1", "group_attr1"], "user_attribute": "user_attr"}]
+ expectation_list = [{'id': 'test_domain', 'groupAttributes': ['memberOf'], 'ldapUrl': 'ldap://test.example.com:389', 'names': ['test.example.com'],
+ 'roleMapCollection': [], 'searchBase': 'ou=accounts,DC=test,DC=example,DC=com', 'userAttribute': 'sAMAccountName'},
+ {'id': 'test_domain', 'groupAttributes': ['memberOf'], 'ldapUrl': 'ldap://test.example.com:389', 'names': ['test.example.com'],
+ 'roleMapCollection': [], 'searchBase': 'ou=accounts,DC=test,DC=example,DC=com', 'userAttribute': 'sAMAccountName',
+ 'bindLookupUser': {'password': 'adminpass', 'user': 'admin'}},
+ {'id': 'test_domain', 'groupAttributes': ['group_attr1', 'group_attr1'], 'ldapUrl': 'ldap://test.example.com:389',
+ 'names': ['name1', 'name2'], 'roleMapCollection': [], 'searchBase': 'ou=accounts,DC=test,DC=example,DC=com',
+ 'userAttribute': 'user_attr', 'bindLookupUser': {'password': 'adminpass', 'user': 'admin'}}]
+ for index in range(len(options_list)):
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.10.0000.0001"}), (200, {"runningAsProxy": True})]):
+ self._set_args(options_list[index])
+ ldap = NetAppESeriesLdap()
+ ldap.build_request_body()
+ self.assertEquals(ldap.body, expectation_list[index])
+
+ def test_are_changes_required_pass(self):
+ """Verify build_request_body builds expected data structure."""
+ options_list = [{"state": "present", "identifier": "test_domain", "server_url": "ldap://test.example.com:389",
+ "search_base": "ou=accounts,DC=test,DC=example,DC=com"},
+ {"state": "present", "identifier": "test_domain", "server_url": "ldap://test.example.com:389",
+ "search_base": "ou=accounts,DC=test,DC=example,DC=com", "bind_user": "admin", "bind_password": "adminpass"},
+ {"state": "present", "identifier": "test_domain", "server_url": "ldap://test.example.com:389",
+ "search_base": "ou=accounts,DC=test,DC=example,DC=com", "bind_user": "admin", "bind_password": "adminpass",
+ "names": ["name1", "name2"], "group_attributes": ["group_attr1", "group_attr1"], "user_attribute": "user_attr"}]
+
+ for index in range(len(options_list)):
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.10.0000.0001"}), (200, {"runningAsProxy": True})]):
+ self._set_args(options_list[index])
+ ldap = NetAppESeriesLdap()
+ ldap.get_domains = lambda: self.GET_DOMAINS["ldapDomains"]
+ self.assertTrue(ldap.are_changes_required())
+
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.10.0000.0001"}), (200, {"runningAsProxy": True})]):
+ self._set_args({"state": "disabled"})
+ ldap = NetAppESeriesLdap()
+ ldap.get_domains = lambda: self.GET_DOMAINS["ldapDomains"]
+ self.assertTrue(ldap.are_changes_required())
+ self.assertEquals(ldap.existing_domain_ids, ["test1", "test2"])
+
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.10.0000.0001"}), (200, {"runningAsProxy": True})]):
+ self._set_args({"state": "absent", "identifier": "test_domain"})
+ ldap = NetAppESeriesLdap()
+ ldap.get_domains = lambda: self.GET_DOMAINS["ldapDomains"]
+ self.assertFalse(ldap.are_changes_required())
+
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.10.0000.0001"}), (200, {"runningAsProxy": True})]):
+ self._set_args({"state": "present", "identifier": "test2", "server_url": "ldap://test2.example.com:389",
+ "search_base": "ou=accounts,DC=test2,DC=example,DC=com",
+ "bind_user": "CN=cn,OU=accounts,DC=test2,DC=example,DC=com", "bind_password": "adminpass",
+ "role_mappings": {".*": ["storage.admin", "support.admin", "security.admin", "storage.monitor"]},
+ "names": ["test2.example.com"], "group_attributes": ["memberOf"], "user_attribute": "sAMAccountName"})
+ ldap = NetAppESeriesLdap()
+ ldap.build_request_body()
+ ldap.get_domains = lambda: self.GET_DOMAINS["ldapDomains"]
+ ldap.add_domain = lambda temporary, skip_test: {"id": "ANSIBLE_TMP_DOMAIN"}
+
+ with mock.patch(self.REQ_FUNC, return_value=(200, [{"id": "test2", "result": {"authenticationTestResult": "ok"}},
+ {"id": "ANSIBLE_TMP_DOMAIN", "result": {"authenticationTestResult": "ok"}}])):
+ self.assertFalse(ldap.are_changes_required())
+
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.10.0000.0001"}), (200, {"runningAsProxy": True})]):
+ self._set_args({"state": "present", "identifier": "test2", "server_url": "ldap://test2.example.com:389",
+ "search_base": "ou=accounts,DC=test,DC=example,DC=com",
+ "bind_user": "CN=cn,OU=accounts,DC=test2,DC=example,DC=com", "bind_password": "adminpass",
+ "role_mappings": {".*": ["storage.admin", "support.admin", "security.admin", "storage.monitor"]},
+ "names": ["test2.example.com"], "group_attributes": ["memberOf"], "user_attribute": "sAMAccountName"})
+ ldap = NetAppESeriesLdap()
+ ldap.build_request_body()
+ ldap.get_domains = lambda: self.GET_DOMAINS["ldapDomains"]
+ ldap.add_domain = lambda temporary, skip_test: {"id": "ANSIBLE_TMP_DOMAIN"}
+
+ with mock.patch(self.REQ_FUNC, return_value=(200, [{"id": "test2", "result": {"authenticationTestResult": "fail"}},
+ {"id": "ANSIBLE_TMP_DOMAIN", "result": {"authenticationTestResult": "ok"}}])):
+ self.assertTrue(ldap.are_changes_required())
+
+ def test_are_changes_required_fail(self):
+ """Verify are_changes_required throws expected exception."""
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.10.0000.0001"}), (200, {"runningAsProxy": True})]):
+ self._set_args({"state": "present", "identifier": "test2", "server_url": "ldap://test2.example.com:389",
+ "search_base": "ou=accounts,DC=test2,DC=example,DC=com",
+ "bind_user": "CN=cn,OU=accounts,DC=test2,DC=example,DC=com", "bind_password": "adminpass",
+ "role_mappings": {".*": ["storage.admin", "support.admin", "security.admin", "storage.monitor"]},
+ "names": ["test2.example.com"], "group_attributes": ["memberOf"], "user_attribute": "sAMAccountName"})
+ ldap = NetAppESeriesLdap()
+ ldap.build_request_body()
+ ldap.get_domains = lambda: self.GET_DOMAINS["ldapDomains"]
+ ldap.add_domain = lambda temporary, skip_test: {"id": "ANSIBLE_TMP_DOMAIN"}
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to authenticate bind credentials!"):
+ with mock.patch(self.REQ_FUNC, return_value=(200, [{"id": "test2", "result": {"authenticationTestResult": "fail"}},
+ {"id": "ANSIBLE_TMP_DOMAIN", "result": {"authenticationTestResult": "fail"}}])):
+ ldap.are_changes_required()
+
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.10.0000.0001"}), (200, {"runningAsProxy": True})]):
+ self._set_args({"state": "present", "identifier": "test2", "server_url": "ldap://test2.example.com:389",
+ "search_base": "ou=accounts,DC=test2,DC=example,DC=com",
+ "bind_user": "CN=cn,OU=accounts,DC=test2,DC=example,DC=com", "bind_password": "adminpass",
+ "role_mappings": {".*": ["storage.admin", "support.admin", "security.admin", "storage.monitor"]},
+ "names": ["test2.example.com"], "group_attributes": ["memberOf"], "user_attribute": "sAMAccountName"})
+ ldap = NetAppESeriesLdap()
+ ldap.build_request_body()
+ ldap.get_domains = lambda: self.GET_DOMAINS["ldapDomains"]
+ ldap.add_domain = lambda temporary, skip_test: {"id": "ANSIBLE_TMP_DOMAIN"}
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to authenticate bind credentials!"):
+ with mock.patch(self.REQ_FUNC, return_value=(200, [{"id": "test2", "result": {"authenticationTestResult": "ok"}},
+ {"id": "ANSIBLE_TMP_DOMAIN", "result": {"authenticationTestResult": "fail"}}])):
+ ldap.are_changes_required()
+
+ def test_add_domain_pass(self):
+ """Verify add_domain returns expected data."""
+ self._set_args({"state": "present", "identifier": "test2", "server_url": "ldap://test2.example.com:389",
+ "search_base": "ou=accounts,DC=test,DC=example,DC=com",
+ "bind_user": "CN=cn,OU=accounts,DC=test2,DC=example,DC=com", "bind_password": "adminpass",
+ "role_mappings": {".*": ["storage.admin", "support.admin", "security.admin", "storage.monitor"]},
+ "names": ["test2.example.com"], "group_attributes": ["memberOf"], "user_attribute": "sAMAccountName"})
+
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.10.0000.0001"}), (200, {"runningAsProxy": True})]):
+ ldap = NetAppESeriesLdap()
+ ldap.build_request_body()
+ with mock.patch(self.REQ_FUNC, return_value=(200, {"ldapDomains": [{"id": "test2"}]})):
+ self.assertEquals(ldap.add_domain(), {"id": "test2"})
+
+ def test_add_domain_fail(self):
+ """Verify add_domain returns expected data."""
+ self._set_args({"state": "present", "identifier": "test2", "server_url": "ldap://test2.example.com:389",
+ "search_base": "ou=accounts,DC=test,DC=example,DC=com",
+ "bind_user": "CN=cn,OU=accounts,DC=test2,DC=example,DC=com", "bind_password": "adminpass",
+ "role_mappings": {".*": ["storage.admin", "support.admin", "security.admin", "storage.monitor"]},
+ "names": ["test2.example.com"], "group_attributes": ["memberOf"], "user_attribute": "sAMAccountName"})
+
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.10.0000.0001"}), (200, {"runningAsProxy": True})]):
+ ldap = NetAppESeriesLdap()
+ ldap.build_request_body()
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to create LDAP domain."):
+ with mock.patch(self.REQ_FUNC, return_value=Exception()):
+ ldap.add_domain()
+
+ def test_update_domain_pass(self):
+ """Verify update_domain returns expected data."""
+ self._set_args({"state": "present", "identifier": "test2", "server_url": "ldap://test2.example.com:389",
+ "search_base": "ou=accounts,DC=test,DC=example,DC=com",
+ "bind_user": "CN=cn,OU=accounts,DC=test2,DC=example,DC=com", "bind_password": "adminpass",
+ "role_mappings": {".*": ["storage.admin", "support.admin", "security.admin", "storage.monitor"]},
+ "names": ["test2.example.com"], "group_attributes": ["memberOf"], "user_attribute": "sAMAccountName"})
+
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.10.0000.0001"}), (200, {"runningAsProxy": True})]):
+ ldap = NetAppESeriesLdap()
+ ldap.build_request_body()
+ ldap.domain = {"id": "test2"}
+ with mock.patch(self.REQ_FUNC, return_value=(200, None)):
+ ldap.update_domain()
+
+ def test_update_domain_fail(self):
+ """Verify update_domain returns expected data."""
+ self._set_args({"state": "present", "identifier": "test2", "server_url": "ldap://test2.example.com:389",
+ "search_base": "ou=accounts,DC=test,DC=example,DC=com",
+ "bind_user": "CN=cn,OU=accounts,DC=test2,DC=example,DC=com", "bind_password": "adminpass",
+ "role_mappings": {".*": ["storage.admin", "support.admin", "security.admin", "storage.monitor"]},
+ "names": ["test2.example.com"], "group_attributes": ["memberOf"], "user_attribute": "sAMAccountName"})
+
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.10.0000.0001"}), (200, {"runningAsProxy": True})]):
+ ldap = NetAppESeriesLdap()
+ ldap.build_request_body()
+ ldap.domain = {"id": "test2"}
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to update LDAP domain."):
+ with mock.patch(self.REQ_FUNC, return_value=Exception()):
+ ldap.update_domain()
+
+ def test_delete_domain_pass(self):
+ """Verify delete_domain returns expected data."""
+ self._set_args({"state": "present", "identifier": "test2", "server_url": "ldap://test2.example.com:389",
+ "search_base": "ou=accounts,DC=test,DC=example,DC=com",
+ "bind_user": "CN=cn,OU=accounts,DC=test2,DC=example,DC=com", "bind_password": "adminpass",
+ "role_mappings": {".*": ["storage.admin", "support.admin", "security.admin", "storage.monitor"]},
+ "names": ["test2.example.com"], "group_attributes": ["memberOf"], "user_attribute": "sAMAccountName"})
+
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.10.0000.0001"}), (200, {"runningAsProxy": True})]):
+ ldap = NetAppESeriesLdap()
+ with mock.patch(self.REQ_FUNC, return_value=(200, None)):
+ ldap.delete_domain("test2")
+
+ def test_delete_domain_fail(self):
+ """Verify delete_domain returns expected data."""
+ self._set_args({"state": "present", "identifier": "test2", "server_url": "ldap://test2.example.com:389",
+ "search_base": "ou=accounts,DC=test,DC=example,DC=com",
+ "bind_user": "CN=cn,OU=accounts,DC=test2,DC=example,DC=com", "bind_password": "adminpass",
+ "role_mappings": {".*": ["storage.admin", "support.admin", "security.admin", "storage.monitor"]},
+ "names": ["test2.example.com"], "group_attributes": ["memberOf"], "user_attribute": "sAMAccountName"})
+
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.10.0000.0001"}), (200, {"runningAsProxy": True})]):
+ ldap = NetAppESeriesLdap()
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to delete LDAP domain."):
+ with mock.patch(self.REQ_FUNC, return_value=Exception()):
+ ldap.delete_domain("test2")
+
+ def test_disable_domains_pass(self):
+ """Verify disable_domains completes successfully."""
+ self._set_args({"state": "present", "identifier": "test2", "server_url": "ldap://test2.example.com:389",
+ "search_base": "ou=accounts,DC=test,DC=example,DC=com",
+ "bind_user": "CN=cn,OU=accounts,DC=test2,DC=example,DC=com", "bind_password": "adminpass",
+ "role_mappings": {".*": ["storage.admin", "support.admin", "security.admin", "storage.monitor"]},
+ "names": ["test2.example.com"], "group_attributes": ["memberOf"], "user_attribute": "sAMAccountName"})
+
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.10.0000.0001"}), (200, {"runningAsProxy": True})]):
+ ldap = NetAppESeriesLdap()
+ ldap.delete_domain = lambda x: None
+ ldap.existing_domain_ids = ["id1", "id2", "id3"]
+ ldap.disable_domains()
+
+ def test_apply_pass(self):
+ """Verify apply exits as expected."""
+ self._set_args({"state": "present", "identifier": "test2", "server_url": "ldap://test2.example.com:389",
+ "search_base": "ou=accounts,DC=test,DC=example,DC=com",
+ "bind_user": "CN=cn,OU=accounts,DC=test2,DC=example,DC=com", "bind_password": "adminpass",
+ "role_mappings": {".*": ["storage.admin", "support.admin", "security.admin", "storage.monitor"]},
+ "names": ["test2.example.com"], "group_attributes": ["memberOf"], "user_attribute": "sAMAccountName"})
+
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.10.0000.0001"}), (200, {"runningAsProxy": True})]):
+ ldap = NetAppESeriesLdap()
+ ldap.build_request_body = lambda: None
+ ldap.are_changes_required = lambda: False
+ with self.assertRaisesRegexp(AnsibleExitJson, "No changes have been made to the LDAP configuration."):
+ ldap.apply()
+
+ self._set_args({"state": "present", "identifier": "test2", "server_url": "ldap://test2.example.com:389",
+ "search_base": "ou=accounts,DC=test,DC=example,DC=com",
+ "bind_user": "CN=cn,OU=accounts,DC=test2,DC=example,DC=com", "bind_password": "adminpass",
+ "role_mappings": {".*": ["storage.admin", "support.admin", "security.admin", "storage.monitor"]},
+ "names": ["test2.example.com"], "group_attributes": ["memberOf"], "user_attribute": "sAMAccountName"})
+
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.10.0000.0001"}), (200, {"runningAsProxy": True})]):
+ ldap = NetAppESeriesLdap()
+ ldap.build_request_body = lambda: None
+ ldap.are_changes_required = lambda: True
+ ldap.add_domain = lambda: None
+ ldap.domain = {}
+ with self.assertRaisesRegexp(AnsibleExitJson, "LDAP domain has been added."):
+ ldap.apply()
+
+ self._set_args({"state": "present", "identifier": "test2", "server_url": "ldap://test2.example.com:389",
+ "search_base": "ou=accounts,DC=test,DC=example,DC=com",
+ "bind_user": "CN=cn,OU=accounts,DC=test2,DC=example,DC=com", "bind_password": "adminpass",
+ "role_mappings": {".*": ["storage.admin", "support.admin", "security.admin", "storage.monitor"]},
+ "names": ["test2.example.com"], "group_attributes": ["memberOf"], "user_attribute": "sAMAccountName"})
+
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.10.0000.0001"}), (200, {"runningAsProxy": True})]):
+ ldap = NetAppESeriesLdap()
+ ldap.build_request_body = lambda: None
+ ldap.are_changes_required = lambda: True
+ ldap.update_domain = lambda: None
+ ldap.domain = {"id": "test"}
+ with self.assertRaisesRegexp(AnsibleExitJson, "LDAP domain has been updated."):
+ ldap.apply()
+
+ self._set_args({"state": "absent", "identifier": "test2"})
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.10.0000.0001"}), (200, {"runningAsProxy": True})]):
+ ldap = NetAppESeriesLdap()
+ ldap.build_request_body = lambda: None
+ ldap.are_changes_required = lambda: True
+ ldap.delete_domain = lambda x: None
+ ldap.domain = {"id": "test"}
+ with self.assertRaisesRegexp(AnsibleExitJson, "LDAP domain has been removed."):
+ ldap.apply()
+
+ self._set_args({"state": "disabled"})
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.10.0000.0001"}), (200, {"runningAsProxy": True})]):
+ ldap = NetAppESeriesLdap()
+ ldap.build_request_body = lambda: None
+ ldap.are_changes_required = lambda: True
+ ldap.disable_domain = lambda: None
+ ldap.domain = {"id": "test"}
+ with self.assertRaisesRegexp(AnsibleExitJson, "All LDAP domains have been removed."):
+ ldap.apply()
diff --git a/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_lun_mapping.py b/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_lun_mapping.py
new file mode 100644
index 000000000..ed44e0de2
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_lun_mapping.py
@@ -0,0 +1,196 @@
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_lun_mapping import NetAppESeriesLunMapping
+from units.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args
+from units.compat import mock
+
+
+class NetAppLunMappingTest(ModuleTestCase):
+ REQUIRED_PARAMS = {"api_username": "rw",
+ "api_password": "password",
+ "api_url": "http://localhost",
+ "ssid": "1"}
+
+ REQ_FUNC = "ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_lun_mapping.NetAppESeriesLunMapping.request"
+ GRAPH_RESPONSE = {"storagePoolBundle": {"host": [{"name": "host1", "hostRef": "1"},
+ {"name": "host2", "hostRef": "2"},
+ {"name": "host3", "hostRef": "3"}],
+ "cluster": [{"name": "hostgroup1", "clusterRef": "10"},
+ {"name": "hostgroup2", "clusterRef": "20"},
+ {"name": "hostgroup3", "clusterRef": "30"}],
+ "lunMapping": [{"volumeRef": "100", "mapRef": "1", "lunMappingRef": "100001", "lun": 5},
+ {"volumeRef": "200", "mapRef": "2", "lunMappingRef": "200001", "lun": 3},
+ {"volumeRef": "1000", "mapRef": "10", "lunMappingRef": "300001", "lun": 6},
+ {"volumeRef": "2000", "mapRef": "20", "lunMappingRef": "400001", "lun": 4}]},
+ "volume": [{"name": "volume1", "volumeRef": "100", "listOfMappings": [{"lun": 5}]},
+ {"name": "volume2", "volumeRef": "200", "listOfMappings": [{"lun": 3}]},
+ {"name": "volume3", "volumeRef": "300", "listOfMappings": []}],
+ "highLevelVolBundle": {"thinVolume": [{"name": "thin_volume1", "volumeRef": "1000", "listOfMappings": [{"lun": 6}]},
+ {"name": "thin_volume2", "volumeRef": "2000", "listOfMappings": [{"lun": 4}]},
+ {"name": "thin_volume3", "volumeRef": "3000", "listOfMappings": []}]},
+ "sa": {"accessVolume": {"name": "access_volume", "accessVolumeRef": "10000"}}}
+ MAPPING_INFO = {"lun_mapping": [{"volume_reference": "100", "map_reference": "1", "lun_mapping_reference": "100001", "lun": 5},
+ {"volume_reference": "200", "map_reference": "2", "lun_mapping_reference": "200001", "lun": 3},
+ {"volume_reference": "1000", "map_reference": "10", "lun_mapping_reference": "300001", "lun": 6},
+ {"volume_reference": "2000", "map_reference": "20", "lun_mapping_reference": "400001", "lun": 4}],
+ "volume_by_reference": {"100": "volume1", "200": "volume2", "300": "volume3", "1000": "thin_volume1", "2000": "thin_volume2",
+ "3000": "thin_volume3", "10000": "access_volume"},
+ "volume_by_name": {"volume1": "100", "volume2": "200", "volume3": "300", "thin_volume1": "1000", "thin_volume2": "2000",
+ "thin_volume3": "3000", "access_volume": "10000"},
+ "lun_by_name": {"volume1": 5, "volume2": 3, "thin_volume1": 6, "thin_volume2": 4},
+ "target_by_reference": {"1": "host1", "2": "host2", "3": "host3", "10": "hostgroup1", "20": "hostgroup2", "30": "hostgroup3",
+ "0000000000000000000000000000000000000000": "DEFAULT_HOSTGROUP"},
+ "target_by_name": {"host1": "1", "host2": "2", "host3": "3", "hostgroup1": "10", "hostgroup2": "20", "hostgroup3": "30",
+ "DEFAULT_HOSTGROUP": "0000000000000000000000000000000000000000"},
+ "target_type_by_name": {"host1": "host", "host2": "host", "host3": "host", "hostgroup1": "group", "hostgroup2": "group",
+ "hostgroup3": "group", "DEFAULT_HOSTGROUP": "group"}}
+
+ def _set_args(self, args=None):
+ module_args = self.REQUIRED_PARAMS.copy()
+ if args is not None:
+ module_args.update(args)
+ set_module_args(module_args)
+
+ def test_update_mapping_info_pass(self):
+ """Verify update_mapping_info method creates the correct data structure."""
+ options = {"target": "host1", "volume": "volume1"}
+ self._set_args(options)
+ mapping = NetAppESeriesLunMapping()
+ with mock.patch(self.REQ_FUNC, return_value=(200, self.GRAPH_RESPONSE)):
+ mapping.update_mapping_info()
+ print("%s" % mapping.mapping_info)
+ self.assertEquals(mapping.mapping_info, self.MAPPING_INFO)
+
+ def test_update_mapping_info_fail(self):
+ """Verify update_mapping_info throws the expected exceptions."""
+ response = {"storagePoolBundle": {"host": [{"name": "host1", "hostRef": "1"},
+ {"name": "host2", "hostRef": "2"},
+ {"name": "host3", "hostRef": "3"}],
+ "cluster": [{"name": "host1", "clusterRef": "10"},
+ {"name": "hostgroup2", "clusterRef": "20"},
+ {"name": "hostgroup3", "clusterRef": "30"}]}}
+ options = {"target": "host1", "volume": "volume1"}
+ self._set_args(options)
+ mapping = NetAppESeriesLunMapping()
+ with mock.patch(self.REQ_FUNC, return_value=(200, response)):
+ with self.assertRaisesRegexp(AnsibleFailJson, "Ambiguous target type: target name is used for both host and group targets!"):
+ mapping.update_mapping_info()
+
+ def test_get_lun_mapping_pass(self):
+ """Verify get_lun_mapping method creates the correct data structure."""
+ options = {"target": "host1", "volume": "volume1"}
+ self._set_args(options)
+ mapping = NetAppESeriesLunMapping()
+ mapping.update_mapping_info = lambda: None
+ mapping.mapping_info = self.MAPPING_INFO
+ self.assertEquals(mapping.get_lun_mapping(), (True, "100001", 5))
+
+ options = {"target": "host1", "volume": "volume1", "lun": 5}
+ self._set_args(options)
+ mapping = NetAppESeriesLunMapping()
+ mapping.update_mapping_info = lambda: None
+ mapping.mapping_info = self.MAPPING_INFO
+ self.assertEquals(mapping.get_lun_mapping(), (True, "100001", 5))
+
+ options = {"target": "host1", "volume": "volume3", "lun": 10}
+ self._set_args(options)
+ mapping = NetAppESeriesLunMapping()
+ mapping.update_mapping_info = lambda: None
+ mapping.mapping_info = self.MAPPING_INFO
+ self.assertEquals(mapping.get_lun_mapping(), (False, None, None))
+
+ def test_get_lun_mapping_fail(self):
+ """Verify get_lun_mapping throws the expected exceptions."""
+ options = {"target": "host1", "volume": "volume3", "lun": 5}
+ self._set_args(options)
+ mapping = NetAppESeriesLunMapping()
+ mapping.update_mapping_info = lambda: None
+ mapping.mapping_info = self.MAPPING_INFO
+ with self.assertRaisesRegexp(AnsibleFailJson, "Option lun value is already in use for target!"):
+ mapping.get_lun_mapping()
+
+ options = {"target": "host10", "volume": "volume3"}
+ self._set_args(options)
+ mapping = NetAppESeriesLunMapping()
+ mapping.update_mapping_info = lambda: None
+ mapping.mapping_info = self.MAPPING_INFO
+ with self.assertRaisesRegexp(AnsibleFailJson, "Target does not exist."):
+ mapping.get_lun_mapping()
+
+ options = {"target": "host1", "volume": "volume10"}
+ self._set_args(options)
+ mapping = NetAppESeriesLunMapping()
+ mapping.update_mapping_info = lambda: None
+ mapping.mapping_info = self.MAPPING_INFO
+ with self.assertRaisesRegexp(AnsibleFailJson, "Volume does not exist."):
+ mapping.get_lun_mapping()
+
+ def test_update_pass(self):
+ """Verify update method creates the correct data structure."""
+ options = {"target": "host1", "volume": "volume1"}
+ self._set_args(options)
+ mapping = NetAppESeriesLunMapping()
+ mapping.update_mapping_info = lambda: None
+ mapping.mapping_info = self.MAPPING_INFO
+ with mock.patch(self.REQ_FUNC, return_value=(200, None)):
+ with self.assertRaises(AnsibleExitJson):
+ mapping.update()
+
+ options = {"target": "host1", "volume": "volume1", "lun": 5}
+ self._set_args(options)
+ mapping = NetAppESeriesLunMapping()
+ mapping.update_mapping_info = lambda: None
+ mapping.mapping_info = self.MAPPING_INFO
+ with mock.patch(self.REQ_FUNC, return_value=(200, None)):
+ with self.assertRaises(AnsibleExitJson):
+ mapping.update()
+
+ options = {"target": "host1", "volume": "volume3", "lun": 10}
+ self._set_args(options)
+ mapping = NetAppESeriesLunMapping()
+ mapping.update_mapping_info = lambda: None
+ mapping.mapping_info = self.MAPPING_INFO
+ with mock.patch(self.REQ_FUNC, return_value=(200, None)):
+ with self.assertRaises(AnsibleExitJson):
+ mapping.update()
+
+ options = {"target": "host1", "volume": "volume1", "lun": 10}
+ self._set_args(options)
+ mapping = NetAppESeriesLunMapping()
+ mapping.update_mapping_info = lambda: None
+ mapping.mapping_info = self.MAPPING_INFO
+ with mock.patch(self.REQ_FUNC, return_value=Exception()):
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to update storage array lun mapping."):
+ mapping.update()
+
+ def test_update_fail(self):
+ """Verify update throws the expected exceptions."""
+ options = {"target": "host3", "volume": "volume3"}
+ self._set_args(options)
+ mapping = NetAppESeriesLunMapping()
+ mapping.update_mapping_info = lambda: None
+ mapping.mapping_info = self.MAPPING_INFO
+ with mock.patch(self.REQ_FUNC, return_value=Exception()):
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to update storage array lun mapping."):
+ mapping.update()
+
+ options = {"state": "absent", "target": "host1", "volume": "volume1"}
+ self._set_args(options)
+ mapping = NetAppESeriesLunMapping()
+ mapping.update_mapping_info = lambda: None
+ mapping.mapping_info = self.MAPPING_INFO
+ with mock.patch(self.REQ_FUNC, return_value=Exception()):
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to update storage array lun mapping."):
+ mapping.update()
+
+ options = {"target": "host3", "volume": "volume3", "lun": 15}
+ self._set_args(options)
+ mapping = NetAppESeriesLunMapping()
+ mapping.update_mapping_info = lambda: None
+ mapping.mapping_info = self.MAPPING_INFO
+ with mock.patch(self.REQ_FUNC, return_value=Exception()):
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to update storage array lun mapping."):
+ mapping.update()
diff --git a/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_mgmt_interface.py b/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_mgmt_interface.py
new file mode 100644
index 000000000..7c35d40dd
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_mgmt_interface.py
@@ -0,0 +1,513 @@
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_mgmt_interface import NetAppESeriesMgmtInterface
+from units.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args
+from units.compat import mock
+
+
+class MgmtInterfaceTest(ModuleTestCase):
+ REQUIRED_PARAMS = {
+ 'api_username': 'rw',
+ 'api_password': 'password',
+ 'api_url': 'http://localhost',
+ 'ssid': '1',
+ }
+
+ TEST_DATA = [
+ {"controllerRef": "070000000000000000000001",
+ "controllerSlot": 1,
+ "interfaceName": "wan0",
+ "interfaceRef": "2800070000000000000000000001000000000000",
+ "channel": 1,
+ "alias": "creG1g-AP-a",
+ "ipv4Enabled": True,
+ "ipv4Address": "10.1.1.10",
+ "linkStatus": "up",
+ "ipv4SubnetMask": "255.255.255.0",
+ "ipv4AddressConfigMethod": "configStatic",
+ "ipv4GatewayAddress": "10.1.1.1",
+ "ipv6Enabled": False,
+ "physicalLocation": {"slot": 0},
+ "dnsProperties": {"acquisitionProperties": {"dnsAcquisitionType": "stat",
+ "dnsServers": [{"addressType": "ipv4",
+ "ipv4Address": "10.1.0.250"},
+ {"addressType": "ipv4",
+ "ipv4Address": "10.10.0.20"}]},
+ "dhcpAcquiredDnsServers": []},
+ "ntpProperties": {"acquisitionProperties": {"ntpAcquisitionType": "disabled",
+ "ntpServers": None},
+ "dhcpAcquiredNtpServers": []}},
+ {"controllerRef": "070000000000000000000001",
+ "controllerSlot": 1,
+ "interfaceName": "wan1",
+ "interfaceRef": "2800070000000000000000000001000000000000",
+ "channel": 2,
+ "alias": "creG1g-AP-a",
+ "ipv4Enabled": True,
+ "linkStatus": "down",
+ "ipv4Address": "0.0.0.0",
+ "ipv4SubnetMask": "0.0.0.0",
+ "ipv4AddressConfigMethod": "configDhcp",
+ "ipv4GatewayAddress": "10.1.1.1",
+ "ipv6Enabled": False,
+ "physicalLocation": {"slot": 1},
+ "dnsProperties": {"acquisitionProperties": {"dnsAcquisitionType": "stat",
+ "dnsServers": [{"addressType": "ipv4",
+ "ipv4Address": "10.1.0.250",
+ "ipv6Address": None},
+ {"addressType": "ipv4",
+ "ipv4Address": "10.10.0.20",
+ "ipv6Address": None}]},
+ "dhcpAcquiredDnsServers": []},
+ "ntpProperties": {"acquisitionProperties": {"ntpAcquisitionType": "disabled",
+ "ntpServers": None},
+ "dhcpAcquiredNtpServers": []}},
+ {"controllerRef": "070000000000000000000002",
+ "controllerSlot": 2,
+ "interfaceName": "wan0",
+ "interfaceRef": "2800070000000000000000000001000000000000",
+ "channel": 1,
+ "alias": "creG1g-AP-b",
+ "ipv4Enabled": True,
+ "ipv4Address": "0.0.0.0",
+ "linkStatus": "down",
+ "ipv4SubnetMask": "0.0.0.0",
+ "ipv4AddressConfigMethod": "configDhcp",
+ "ipv4GatewayAddress": "10.1.1.1",
+ "ipv6Enabled": False,
+ "physicalLocation": {"slot": 0},
+ "dnsProperties": {"acquisitionProperties": {"dnsAcquisitionType": "stat",
+ "dnsServers": [{"addressType": "ipv4",
+ "ipv4Address": "10.1.0.250",
+ "ipv6Address": None}]},
+ "dhcpAcquiredDnsServers": []},
+ "ntpProperties": {"acquisitionProperties": {"ntpAcquisitionType": "stat",
+ "ntpServers": [{"addrType": "ipvx",
+ "domainName": None,
+ "ipvxAddress": {"addressType": "ipv4",
+ "ipv4Address": "10.13.1.5",
+ "ipv6Address": None}},
+ {"addrType": "ipvx",
+ "domainName": None,
+ "ipvxAddress": {"addressType": "ipv4",
+ "ipv4Address": "10.15.1.8",
+ "ipv6Address": None}}]},
+ "dhcpAcquiredNtpServers": []}},
+ {"controllerRef": "070000000000000000000002",
+ "controllerSlot": 2,
+ "interfaceName": "wan1",
+ "interfaceRef": "2801070000000000000000000001000000000000",
+ "channel": 2,
+ "alias": "creG1g-AP-b",
+ "ipv4Enabled": True,
+ "ipv4Address": "0.0.0.0",
+ "linkStatus": "down",
+ "ipv4SubnetMask": "0.0.0.0",
+ "ipv4AddressConfigMethod": "configDhcp",
+ "ipv4GatewayAddress": "10.1.1.1",
+ "ipv6Enabled": False,
+ "physicalLocation": {"slot": 1},
+ "dnsProperties": {"acquisitionProperties": {"dnsAcquisitionType": "stat",
+ "dnsServers": [{"addressType": "ipv4",
+ "ipv4Address": "10.19.1.2",
+ "ipv6Address": None}]},
+ "dhcpAcquiredDnsServers": []},
+ "ntpProperties": {"acquisitionProperties": {"ntpAcquisitionType": "stat",
+ "ntpServers": [{"addrType": "ipvx",
+ "domainName": None,
+ "ipvxAddress": {"addressType": "ipv4",
+ "ipv4Address": "10.13.1.5",
+ "ipv6Address": None}},
+ {"addrType": "ipvx",
+ "domainName": None,
+ "ipvxAddress": {"addressType": "ipv4",
+ "ipv4Address": "10.15.1.18",
+ "ipv6Address": None}}]},
+ "dhcpAcquiredNtpServers": []}}]
+
+ REQ_FUNC = 'ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_mgmt_interface.NetAppESeriesMgmtInterface.request'
+ TIME_FUNC = 'ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_mgmt_interface.sleep'
+
+ def _set_args(self, args=None):
+ module_args = self.REQUIRED_PARAMS.copy()
+ if args is not None:
+ module_args.update(args)
+ set_module_args(module_args)
+
+ def test_get_controllers_pass(self):
+ """Verify dictionary return from get_controllers."""
+ initial = {
+ "state": "enabled",
+ "controller": "A",
+ "port": "1",
+ "address": "192.168.1.1",
+ "subnet_mask": "255.255.255.1",
+ "config_method": "static"}
+ controller_request = [
+ {"physicalLocation": {"slot": 2},
+ "controllerRef": "070000000000000000000002",
+ "networkSettings": {"remoteAccessEnabled": True}},
+ {"physicalLocation": {"slot": 1},
+ "controllerRef": "070000000000000000000001",
+ "networkSettings": {"remoteAccessEnabled": False}}]
+ expected = {
+ 'A': {'controllerRef': '070000000000000000000001',
+ 'controllerSlot': 1, 'ssh': False},
+ 'B': {'controllerRef': '070000000000000000000002',
+ 'controllerSlot': 2, 'ssh': True}}
+
+ self._set_args(initial)
+ mgmt_interface = NetAppESeriesMgmtInterface()
+
+ with mock.patch(self.REQ_FUNC, return_value=(200, controller_request)):
+ response = mgmt_interface.get_controllers()
+ self.assertTrue(response == expected)
+
+ def test_controller_property_fail(self):
+ """Verify controllers endpoint request failure causes AnsibleFailJson exception."""
+ initial = {
+ "state": "enabled",
+ "controller": "A",
+ "port": "1",
+ "address": "192.168.1.1",
+ "subnet_mask": "255.255.255.1",
+ "config_method": "static"}
+ controller_request = [
+ {"physicalLocation": {"slot": 2},
+ "controllerRef": "070000000000000000000002",
+ "networkSettings": {"remoteAccessEnabled": True}},
+ {"physicalLocation": {"slot": 1},
+ "controllerRef": "070000000000000000000001",
+ "networkSettings": {"remoteAccessEnabled": False}}]
+ expected = {
+ 'A': {'controllerRef': '070000000000000000000001',
+ 'controllerSlot': 1, 'ssh': False},
+ 'B': {'controllerRef': '070000000000000000000002',
+ 'controllerSlot': 2, 'ssh': True}}
+
+ self._set_args(initial)
+ mgmt_interface = NetAppESeriesMgmtInterface()
+ with self.assertRaisesRegexp(AnsibleFailJson, r"Failed to retrieve the controller settings."):
+ with mock.patch(self.REQ_FUNC, return_value=Exception):
+ response = mgmt_interface.get_controllers()
+
+ def test_update_target_interface_info_pass(self):
+ """Verify return value from interface property."""
+ initial = {
+ "state": "enabled",
+ "controller": "A",
+ "port": "1",
+ "address": "192.168.1.1",
+ "subnet_mask": "255.255.255.0",
+ "config_method": "static"}
+ get_controller = {"A": {"controllerSlot": 1, "controllerRef": "070000000000000000000001", "ssh": False},
+ "B": {"controllerSlot": 2, "controllerRef": "070000000000000000000002", "ssh": True}}
+ expected = {"channel": 1, "link_status": "up", "enabled": True, "address": "10.1.1.10", "gateway": "10.1.1.1", "subnet_mask": "255.255.255.0",
+ "dns_config_method": "stat",
+ "dns_servers": [{"addressType": "ipv4", "ipv4Address": "10.1.0.250"}, {"addressType": "ipv4", "ipv4Address": "10.10.0.20"}],
+ "ntp_config_method": "disabled", "ntp_servers": None, "config_method": "configStatic", "controllerRef": "070000000000000000000001",
+ "controllerSlot": 1, "ipv6_enabled": False, "id": "2800070000000000000000000001000000000000", "ssh": False}
+
+ self._set_args(initial)
+ mgmt_interface = NetAppESeriesMgmtInterface()
+ mgmt_interface.get_controllers = lambda: get_controller
+
+ with mock.patch(self.REQ_FUNC, return_value=(200, self.TEST_DATA)):
+ mgmt_interface.update_target_interface_info()
+ self.assertEquals(mgmt_interface.interface_info, expected)
+
+ def test_interface_property_request_exception_fail(self):
+ """Verify ethernet-interfaces endpoint request failure results in AnsibleFailJson exception."""
+ initial = {
+ "state": "enabled",
+ "controller": "A",
+ "port": "1",
+ "address": "192.168.1.1",
+ "subnet_mask": "255.255.255.0",
+ "config_method": "static"}
+ get_controller = {"A": {"controllerSlot": 1, "controllerRef": "070000000000000000000001", "ssh": False},
+ "B": {"controllerSlot": 2, "controllerRef": "070000000000000000000002", "ssh": True}}
+
+ self._set_args(initial)
+ mgmt_interface = NetAppESeriesMgmtInterface()
+ mgmt_interface.get_controllers = lambda: get_controller
+
+ with self.assertRaisesRegexp(AnsibleFailJson, r"Failed to retrieve defined management interfaces."):
+ with mock.patch(self.REQ_FUNC, return_value=Exception()):
+ mgmt_interface.update_target_interface_info()
+
+ def test_update_target_interface_info_fail(self):
+ """Verify return value from update_target_interface_info method."""
+ initial = {
+ "state": "enabled",
+ "controller": "A",
+ "port": "3",
+ "address": "192.168.1.1",
+ "subnet_mask": "255.255.255.1",
+ "config_method": "static"}
+ get_controller = {"A": {"controllerSlot": 1, "controllerRef": "070000000000000000000001", "ssh": False},
+ "B": {"controllerSlot": 2, "controllerRef": "070000000000000000000002", "ssh": True}}
+
+ self._set_args(initial)
+ mgmt_interface = NetAppESeriesMgmtInterface()
+ mgmt_interface.get_controllers = lambda: get_controller
+
+ with self.assertRaisesRegexp(AnsibleFailJson, "Invalid port number! Controller .*? ports:"):
+ with mock.patch(self.REQ_FUNC, return_value=(200, self.TEST_DATA)):
+ mgmt_interface.update_target_interface_info()
+
+ def test_update_body_enable_interface_setting_pass(self):
+ """Validate update_body_enable_interface_setting updates properly."""
+ initial = {"state": "enabled", "controller": "A", "port": "1", "address": "192.168.1.1", "subnet_mask": "255.255.255.1", "config_method": "static"}
+ interface_info = {"channel": 1, "link_status": "up", "enabled": True, "address": "10.1.1.10", "gateway": "10.1.1.1",
+ "subnet_mask": "255.255.255.0",
+ "dns_config_method": "stat",
+ "dns_servers": [{"addressType": "ipv4", "ipv4Address": "10.1.0.250"},
+ {"addressType": "ipv4", "ipv4Address": "10.10.0.20"}],
+ "ntp_config_method": "disabled", "ntp_servers": None, "config_method": "configStatic",
+ "controllerRef": "070000000000000000000001",
+ "controllerSlot": 1, "ipv6_enabled": True, "id": "2800070000000000000000000001000000000000", "ssh": False}
+ self._set_args(initial)
+ mgmt_interface = NetAppESeriesMgmtInterface()
+ mgmt_interface.interface_info = interface_info
+ change_required = mgmt_interface.update_body_enable_interface_setting()
+ self.assertFalse(change_required)
+ self.assertTrue("ipv4Enabled" in mgmt_interface.body and mgmt_interface.body["ipv4Enabled"])
+
+ initial = {"state": "disabled", "controller": "A", "port": "1", "address": "192.168.1.1", "subnet_mask": "255.255.255.1", "config_method": "static"}
+ self._set_args(initial)
+ mgmt_interface = NetAppESeriesMgmtInterface()
+ mgmt_interface.interface_info = interface_info
+ change_required = mgmt_interface.update_body_enable_interface_setting()
+ self.assertTrue(change_required)
+ self.assertTrue("ipv4Enabled" in mgmt_interface.body and not mgmt_interface.body["ipv4Enabled"])
+
+ def test_update_body_enable_interface_setting_fail(self):
+ """Validate update_body_enable_interface_setting throws expected exception"""
+ initial = {"state": "disabled", "controller": "A", "port": "1", "address": "192.168.1.1", "subnet_mask": "255.255.255.1", "config_method": "static"}
+ interface_info = {"channel": 1, "link_status": "up", "enabled": True, "address": "10.1.1.10", "gateway": "10.1.1.1",
+ "subnet_mask": "255.255.255.0",
+ "dns_config_method": "stat",
+ "dns_servers": [{"addressType": "ipv4", "ipv4Address": "10.1.0.250"},
+ {"addressType": "ipv4", "ipv4Address": "10.10.0.20"}],
+ "ntp_config_method": "disabled", "ntp_servers": None, "config_method": "configStatic",
+ "controllerRef": "070000000000000000000001",
+ "controllerSlot": 1, "ipv6_enabled": False, "id": "2800070000000000000000000001000000000000", "ssh": False}
+
+ self._set_args(initial)
+ mgmt_interface = NetAppESeriesMgmtInterface()
+ mgmt_interface.interface_info = interface_info
+ with self.assertRaisesRegexp(AnsibleFailJson, "Either IPv4 or IPv6 must be enabled."):
+ mgmt_interface.update_body_enable_interface_setting()
+
+ def test_update_body_interface_settings_fail(self):
+ """Validate update_body_interface_settings throws expected exception"""
+ initial = {"state": "enabled", "controller": "A", "port": "1", "address": "192.168.1.1", "subnet_mask": "255.255.255.1", "config_method": "static"}
+ interface_info = {"channel": 1, "link_status": "up", "enabled": True, "address": "10.1.1.10", "gateway": "10.1.1.1",
+ "subnet_mask": "255.255.255.0",
+ "dns_config_method": "stat",
+ "dns_servers": [{"addressType": "ipv4", "ipv4Address": "10.1.0.250"},
+ {"addressType": "ipv4", "ipv4Address": "10.10.0.20"}],
+ "ntp_config_method": "disabled", "ntp_servers": None, "config_method": "configStatic",
+ "controllerRef": "070000000000000000000001",
+ "controllerSlot": 1, "ipv6_enabled": False, "id": "2800070000000000000000000001000000000000", "ssh": False}
+
+ self._set_args(initial)
+ mgmt_interface = NetAppESeriesMgmtInterface()
+ mgmt_interface.interface_info = interface_info
+ self.assertTrue(mgmt_interface.update_body_interface_settings())
+ self.assertEquals(mgmt_interface.body, {"ipv4AddressConfigMethod": "configStatic", "ipv4Address": "192.168.1.1", "ipv4SubnetMask": "255.255.255.1"})
+
+ initial = {"state": "enabled", "controller": "A", "port": "1", "address": "192.168.1.100", "subnet_mask": "255.255.255.1", "gateway": "192.168.1.1",
+ "config_method": "static"}
+ interface_info = {"channel": 1, "link_status": "up", "enabled": True, "address": "10.1.1.10", "gateway": "10.1.1.1",
+ "subnet_mask": "255.255.255.0",
+ "dns_config_method": "stat",
+ "dns_servers": [{"addressType": "ipv4", "ipv4Address": "10.1.0.250"},
+ {"addressType": "ipv4", "ipv4Address": "10.10.0.20"}],
+ "ntp_config_method": "disabled", "ntp_servers": None, "config_method": "configStatic",
+ "controllerRef": "070000000000000000000001",
+ "controllerSlot": 1, "ipv6_enabled": False, "id": "2800070000000000000000000001000000000000", "ssh": False}
+
+ self._set_args(initial)
+ mgmt_interface = NetAppESeriesMgmtInterface()
+ mgmt_interface.interface_info = interface_info
+ self.assertTrue(mgmt_interface.update_body_interface_settings())
+ self.assertEquals(mgmt_interface.body, {"ipv4AddressConfigMethod": "configStatic", "ipv4Address": "192.168.1.100", "ipv4SubnetMask": "255.255.255.1",
+ "ipv4GatewayAddress": "192.168.1.1"})
+
+ initial = {"state": "enabled", "controller": "A", "port": "1", "config_method": "dhcp"}
+ interface_info = {"channel": 1, "link_status": "up", "enabled": True, "address": "10.1.1.10", "gateway": "10.1.1.1",
+ "subnet_mask": "255.255.255.0",
+ "dns_config_method": "stat",
+ "dns_servers": [{"addressType": "ipv4", "ipv4Address": "10.1.0.250"},
+ {"addressType": "ipv4", "ipv4Address": "10.10.0.20"}],
+ "ntp_config_method": "disabled", "ntp_servers": None, "config_method": "configStatic",
+ "controllerRef": "070000000000000000000001",
+ "controllerSlot": 1, "ipv6_enabled": False, "id": "2800070000000000000000000001000000000000", "ssh": False}
+
+ self._set_args(initial)
+ mgmt_interface = NetAppESeriesMgmtInterface()
+ mgmt_interface.interface_info = interface_info
+ self.assertTrue(mgmt_interface.update_body_interface_settings())
+ self.assertEquals(mgmt_interface.body, {"ipv4AddressConfigMethod": "configDhcp"})
+
+ initial = {"state": "enabled", "controller": "A", "port": "1", "config_method": "dhcp"}
+ interface_info = {"channel": 1, "link_status": "up", "enabled": True, "address": "10.1.1.10", "gateway": "10.1.1.1",
+ "subnet_mask": "255.255.255.0",
+ "dns_config_method": "stat",
+ "dns_servers": [{"addressType": "ipv4", "ipv4Address": "10.1.0.250"},
+ {"addressType": "ipv4", "ipv4Address": "10.10.0.20"}],
+ "ntp_config_method": "disabled", "ntp_servers": None, "config_method": "configDhcp",
+ "controllerRef": "070000000000000000000001",
+ "controllerSlot": 1, "ipv6_enabled": False, "id": "2800070000000000000000000001000000000000", "ssh": False}
+
+ self._set_args(initial)
+ mgmt_interface = NetAppESeriesMgmtInterface()
+ mgmt_interface.interface_info = interface_info
+ self.assertFalse(mgmt_interface.update_body_interface_settings())
+ self.assertEquals(mgmt_interface.body, {"ipv4AddressConfigMethod": "configDhcp"})
+
+ def test_update_body_dns_server_settings_pass(self):
+ """Validate update_body_dns_server_settings throws expected exception"""
+ interface_info = {"channel": 1, "link_status": "up", "enabled": True, "address": "10.1.1.10", "gateway": "10.1.1.1",
+ "subnet_mask": "255.255.255.0",
+ "dns_config_method": "stat",
+ "dns_servers": [{"addressType": "ipv4", "ipv4Address": "10.1.0.250"},
+ {"addressType": "ipv4", "ipv4Address": "10.10.0.20"}],
+ "ntp_config_method": "disabled", "ntp_servers": None, "config_method": "configStatic",
+ "controllerRef": "070000000000000000000001",
+ "controllerSlot": 1, "ipv6_enabled": False, "id": "2800070000000000000000000001000000000000", "ssh": False}
+
+ initial = {"state": "enabled", "controller": "A", "port": "1", "dns_config_method": "dhcp"}
+ self._set_args(initial)
+ mgmt_interface = NetAppESeriesMgmtInterface()
+ mgmt_interface.interface_info = interface_info
+ self.assertTrue(mgmt_interface.update_body_dns_server_settings())
+ self.assertEquals(mgmt_interface.body, {"dnsAcquisitionDescriptor": {"dnsAcquisitionType": "dhcp"}})
+
+ initial = {"state": "enabled", "controller": "A", "port": "1", "dns_config_method": "static", "dns_address": "192.168.1.100"}
+ self._set_args(initial)
+ mgmt_interface = NetAppESeriesMgmtInterface()
+ mgmt_interface.interface_info = interface_info
+ self.assertTrue(mgmt_interface.update_body_dns_server_settings())
+ self.assertEquals(mgmt_interface.body, {"dnsAcquisitionDescriptor": {"dnsAcquisitionType": "stat",
+ "dnsServers": [{"addressType": "ipv4", "ipv4Address": "192.168.1.100"}]}})
+
+ initial = {"state": "enabled", "controller": "A", "port": "1", "dns_config_method": "static", "dns_address": "192.168.1.100",
+ "dns_address_backup": "192.168.1.102"}
+ self._set_args(initial)
+ mgmt_interface = NetAppESeriesMgmtInterface()
+ mgmt_interface.interface_info = interface_info
+ self.assertTrue(mgmt_interface.update_body_dns_server_settings())
+ self.assertEquals(mgmt_interface.body, {"dnsAcquisitionDescriptor": {"dnsAcquisitionType": "stat",
+ "dnsServers": [{"addressType": "ipv4", "ipv4Address": "192.168.1.100"},
+ {"addressType": "ipv4", "ipv4Address": "192.168.1.102"}]}})
+
+ def test_update_body_ntp_server_settings_pass(self):
+ """Validate update_body_ntp_server_settings throws expected exception"""
+ interface_info = {"channel": 1, "link_status": "up", "enabled": True, "address": "10.1.1.10", "gateway": "10.1.1.1",
+ "subnet_mask": "255.255.255.0",
+ "dns_config_method": "stat",
+ "dns_servers": [{"addressType": "ipv4", "ipv4Address": "10.1.0.250"},
+ {"addressType": "ipv4", "ipv4Address": "10.10.0.20"}],
+ "ntp_config_method": "dhcp", "ntp_servers": None, "config_method": "configStatic",
+ "controllerRef": "070000000000000000000001",
+ "controllerSlot": 1, "ipv6_enabled": False, "id": "2800070000000000000000000001000000000000", "ssh": False}
+
+ initial = {"state": "enabled", "controller": "A", "port": "1", "ntp_config_method": "disabled"}
+ self._set_args(initial)
+ mgmt_interface = NetAppESeriesMgmtInterface()
+ mgmt_interface.interface_info = interface_info
+ self.assertTrue(mgmt_interface.update_body_ntp_server_settings())
+ self.assertEquals(mgmt_interface.body, {"ntpAcquisitionDescriptor": {"ntpAcquisitionType": "disabled"}})
+
+ initial = {"state": "enabled", "controller": "A", "port": "1", "ntp_config_method": "dhcp"}
+ self._set_args(initial)
+ mgmt_interface = NetAppESeriesMgmtInterface()
+ mgmt_interface.interface_info = interface_info
+ self.assertFalse(mgmt_interface.update_body_ntp_server_settings())
+ self.assertEquals(mgmt_interface.body, {"ntpAcquisitionDescriptor": {"ntpAcquisitionType": "dhcp"}})
+
+ initial = {"state": "enabled", "controller": "A", "port": "1", "ntp_config_method": "static", "ntp_address": "192.168.1.200"}
+ self._set_args(initial)
+ mgmt_interface = NetAppESeriesMgmtInterface()
+ mgmt_interface.interface_info = interface_info
+ self.assertTrue(mgmt_interface.update_body_ntp_server_settings())
+ self.assertEquals(mgmt_interface.body, {"ntpAcquisitionDescriptor": {
+ "ntpAcquisitionType": "stat", "ntpServers": [{"addrType": "ipvx", "ipvxAddress": {"addressType": "ipv4", "ipv4Address": "192.168.1.200"}}]}})
+
+ initial = {"state": "enabled", "controller": "A", "port": "1", "ntp_config_method": "static", "ntp_address": "192.168.1.200",
+ "ntp_address_backup": "192.168.1.202"}
+ self._set_args(initial)
+ mgmt_interface = NetAppESeriesMgmtInterface()
+ mgmt_interface.interface_info = interface_info
+ self.assertTrue(mgmt_interface.update_body_ntp_server_settings())
+ self.assertEquals(mgmt_interface.body, {"ntpAcquisitionDescriptor": {
+ "ntpAcquisitionType": "stat", "ntpServers": [{"addrType": "ipvx", "ipvxAddress": {"addressType": "ipv4", "ipv4Address": "192.168.1.200"}},
+ {"addrType": "ipvx", "ipvxAddress": {"addressType": "ipv4", "ipv4Address": "192.168.1.202"}}]}})
+
+ def test_update_body_ssh_setting_pass(self):
+ """Validate update_body_ssh_setting throws expected exception"""
+ interface_info = {"channel": 1, "link_status": "up", "enabled": True, "address": "10.1.1.10", "gateway": "10.1.1.1",
+ "subnet_mask": "255.255.255.0",
+ "dns_config_method": "stat",
+ "dns_servers": [{"addressType": "ipv4", "ipv4Address": "10.1.0.250"},
+ {"addressType": "ipv4", "ipv4Address": "10.10.0.20"}],
+ "ntp_config_method": "disabled", "ntp_servers": None, "config_method": "configStatic",
+ "controllerRef": "070000000000000000000001",
+ "controllerSlot": 1, "ipv6_enabled": False, "id": "2800070000000000000000000001000000000000", "ssh": False}
+
+ initial = {"state": "enabled", "controller": "A", "port": "1", "config_method": "dhcp", "ssh": True}
+ self._set_args(initial)
+ mgmt_interface = NetAppESeriesMgmtInterface()
+ mgmt_interface.interface_info = interface_info
+ self.assertTrue(mgmt_interface.update_body_ssh_setting())
+ self.assertEquals(mgmt_interface.body, {"enableRemoteAccess": True})
+
+ initial = {"state": "enabled", "controller": "A", "port": "1", "config_method": "dhcp", "ssh": False}
+ self._set_args(initial)
+ mgmt_interface = NetAppESeriesMgmtInterface()
+ mgmt_interface.interface_info = interface_info
+ self.assertFalse(mgmt_interface.update_body_ssh_setting())
+ self.assertEquals(mgmt_interface.body, {"enableRemoteAccess": False})
+
+ def test_update_url_pass(self):
+ """Verify update_url returns expected url."""
+ initial = {"state": "enabled", "controller": "A", "port": "1", "config_method": "dhcp", "ssh": False}
+ self._set_args(initial)
+ mgmt_interface = NetAppESeriesMgmtInterface()
+ mgmt_interface.url = "https://192.168.1.100:8443/devmgr/v2/"
+ mgmt_interface.alt_interface_addresses = ["192.168.1.102"]
+ mgmt_interface.update_url()
+ self.assertTrue(mgmt_interface.url, "https://192.168.1.102:8443/devmgr/v2/")
+
+ def test_update_pass(self):
+ """Verify update successfully completes."""
+ initial = {"state": "enabled", "controller": "A", "port": "1", "config_method": "dhcp", "ssh": False}
+ self._set_args(initial)
+ mgmt_interface = NetAppESeriesMgmtInterface()
+ mgmt_interface.update_request_body = lambda: False
+ mgmt_interface.is_embedded = lambda: False
+ mgmt_interface.use_alternate_address = False
+ with self.assertRaisesRegexp(AnsibleExitJson, "No changes are required."):
+ with mock.patch(self.REQ_FUNC, return_value=(200, None)):
+ mgmt_interface.update()
+
+ def update_request_body():
+ update_request_body.value = not update_request_body.value
+ return update_request_body.value
+ update_request_body.value = False
+
+ initial = {"state": "enabled", "controller": "A", "port": "1", "config_method": "dhcp", "ssh": False}
+ self._set_args(initial)
+ mgmt_interface = NetAppESeriesMgmtInterface()
+ mgmt_interface.update_request_body = update_request_body
+ mgmt_interface.is_embedded = lambda: True
+ mgmt_interface.use_alternate_address = False
+ with self.assertRaisesRegexp(AnsibleExitJson, "The interface settings have been updated."):
+ with mock.patch(self.REQ_FUNC, return_value=(200, None)):
+ mgmt_interface.update()
diff --git a/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_nvme_interface.py b/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_nvme_interface.py
new file mode 100644
index 000000000..aee149f0a
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_nvme_interface.py
@@ -0,0 +1,220 @@
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_nvme_interface import NetAppESeriesNvmeInterface
+from units.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args
+from units.compat import mock
+
+
+class NvmeInterfaceTest(ModuleTestCase):
+ REQUIRED_PARAMS = {"api_username": "rw",
+ "api_password": "password",
+ "api_url": "http://localhost",
+ "ssid": "1",
+ "state": "enabled",
+ "controller": "A",
+ "channel": 1}
+
+ REQ_FUNC = "ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_nvme_interface.NetAppESeriesNvmeInterface.request"
+
+ def _set_args(self, args=None):
+ module_args = self.REQUIRED_PARAMS.copy()
+ if args is not None:
+ module_args.update(args)
+ set_module_args(module_args)
+
+ def test_valid_options_pass(self):
+ """Verify valid options."""
+ valid_option_list = [{"state": "enabled", "config_method": "static", "address": "192.168.1.100", "subnet_mask": "255.255.255.0",
+ "gateway": "192.168.1.1", "mtu": 1500},
+ {"address": "192.168.1.100"},
+ {"state": "enabled", "config_method": "dhcp", "mtu": 1500},
+ {"state": "disabled"}]
+
+ for option in valid_option_list:
+ self._set_args(option)
+ nvme = NetAppESeriesNvmeInterface()
+
+ def test_invalid_options_fail(self):
+ """Verify invalid options throw expected exceptions."""
+ invalid_option_list = [{"state": "enabled", "config_method": "static", "address": "1920.168.1.100", "subnet_mask": "255.255.255.0",
+ "gateway": "192.168.1.1", "mtu": 1500},
+ {"state": "enabled", "config_method": "static", "address": "192.168.1.100", "subnet_mask": "255.2550.255.0",
+ "gateway": "192.168.1.1", "mtu": 1500},
+ {"state": "enabled", "config_method": "static", "address": "192.168.1.100", "subnet_mask": "255.255.255.0",
+ "gateway": "192.168..100", "mtu": 1500},
+ {"state": "enabled", "config_method": "static", "address": "192.168.1.100", "subnet_mask": "2550.255.255.0",
+ "gateway": "192.168.1.1000", "mtu": 1500}]
+
+ for option in invalid_option_list:
+ self._set_args(option)
+ with self.assertRaises(AnsibleFailJson):
+ nvme = NetAppESeriesNvmeInterface()
+
+ def test_get_nvmeof_interfaces_pass(self):
+ """Verify get_nvmeof_interfaces method returns the expected list of interface values."""
+ options = {"address": "192.168.1.100"}
+ response = [{"controllerRef": "070000000000000000000001", "interfaceRef": "2201020000000000000000000000000000000000",
+ "ioInterfaceTypeData": {"interfaceType": "ib",
+ "ib": {"interfaceRef": "2201020000000000000000000000000000000000", "channel": 1, "linkState": "up"}},
+ "commandProtocolPropertiesList": {"commandProtocolProperties": [
+ {"commandProtocol": "nvme", "nvmeProperties": {"commandSet": "nvmeof", "nvmeofProperties": {
+ "provider": "providerInfiniband", "ibProperties": {"ipAddressData": {
+ "addressType": "ipv4", "ipv4Data": {"configState": "configured", "ipv4Address": "192.168.1.100"}}}}}}]}}]
+ self._set_args(options)
+ nvme = NetAppESeriesNvmeInterface()
+ with mock.patch(self.REQ_FUNC, return_value=(200, response)):
+ self.assertEquals(nvme.get_nvmeof_interfaces(), [
+ {"properties": {"provider": "providerInfiniband", "ibProperties": {
+ "ipAddressData": {"addressType": "ipv4",
+ "ipv4Data": {"configState": "configured", "ipv4Address": "192.168.1.100"}}}},
+ "reference": "2201020000000000000000000000000000000000", "channel": 1, "interface_type": "ib",
+ "interface": {"interfaceRef": "2201020000000000000000000000000000000000", "channel": 1,
+ "linkState": "up"}, "controller_id": "070000000000000000000001",
+ "link_status": "up"}])
+
+ def test_get_nvmeof_interfaces_fail(self):
+ """Verify get_nvmeof_interfaces method throws the expected exceptions."""
+ options = {"address": "192.168.1.100"}
+ self._set_args(options)
+ nvme = NetAppESeriesNvmeInterface()
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to retrieve defined host interfaces."):
+ with mock.patch(self.REQ_FUNC, return_value=Exception()):
+ nvme.get_nvmeof_interfaces()
+
+ def test_get_target_interface_pass(self):
+ """Verify get_target_interface returns the expected interface."""
+ # options = {"state": "enabled", "config_method": "static", "address": "192.168.1.100", "subnet_mask": "255.255.255.0",
+ # "gateway": "192.168.1.1", "mtu": 1500}
+ options = {"address": "192.168.1.200"}
+ self._set_args(options)
+ nvme = NetAppESeriesNvmeInterface()
+ nvme.get_nvmeof_interfaces = lambda: [
+ {"properties": {"provider": "providerInfiniband", "ibProperties": {
+ "ipAddressData": {"addressType": "ipv4",
+ "ipv4Data": {"configState": "configured", "ipv4Address": "192.168.1.100"}}}},
+ "reference": "2201020000000000000000000000000000000000", "channel": 5,
+ "interface_type": {"interfaceRef": "2201020000000000000000000000000000000000", "channel": 5,
+ "linkState": "up"}, "controller_id": "070000000000000000000001",
+ "link_status": "up"},
+ {"properties": {"provider": "providerInfiniband", "ibProperties": {
+ "ipAddressData": {"addressType": "ipv4",
+ "ipv4Data": {"configState": "configured", "ipv4Address": "192.168.2.100"}}}},
+ "reference": "2201030000000000000000000000000000000000", "channel": 4,
+ "interface_type": {"interfaceRef": "2201030000000000000000000000000000000000", "channel": 4,
+ "linkState": "up"}, "controller_id": "070000000000000000000001",
+ "link_status": "up"},
+ {"properties": {"provider": "providerInfiniband", "ibProperties": {
+ "ipAddressData": {"addressType": "ipv4",
+ "ipv4Data": {"configState": "configured", "ipv4Address": "192.168.3.100"}}}},
+ "reference": "2201040000000000000000000000000000000000", "channel": 6,
+ "interface_type": {"interfaceRef": "2201040000000000000000000000000000000000", "channel": 6,
+ "linkState": "down"}, "controller_id": "070000000000000000000001",
+ "link_status": "up"}]
+ nvme.get_controllers = lambda: {"A": "070000000000000000000001", "B": "070000000000000000000002"}
+ self.assertEqual(nvme.get_target_interface(), {
+ "properties": {"provider": "providerInfiniband", "ibProperties": {
+ "ipAddressData": {"addressType": "ipv4",
+ "ipv4Data": {"configState": "configured", "ipv4Address": "192.168.2.100"}}}},
+ "reference": "2201030000000000000000000000000000000000", "channel": 4,
+ "interface_type": {"interfaceRef": "2201030000000000000000000000000000000000", "channel": 4,
+ "linkState": "up"}, "controller_id": "070000000000000000000001",
+ "link_status": "up"})
+
+ def test_get_target_interface_fail(self):
+ """Verify get_target_interface method throws the expected exceptions."""
+ options = {"address": "192.168.1.200", "channel": "0"}
+ self._set_args(options)
+ nvme = NetAppESeriesNvmeInterface()
+ nvme.get_nvmeof_interfaces = lambda: [
+ {"properties": {"provider": "providerInfiniband", "ibProperties": {
+ "ipAddressData": {"addressType": "ipv4",
+ "ipv4Data": {"configState": "configured", "ipv4Address": "192.168.1.100"}}}},
+ "reference": "2201020000000000000000000000000000000000", "channel": 5,
+ "interface_type": {"interfaceRef": "2201020000000000000000000000000000000000", "channel": 5,
+ "linkState": "up"}, "controller_id": "070000000000000000000001",
+ "link_status": "up"}]
+ nvme.get_controllers = lambda: {"A": "070000000000000000000001", "B": "070000000000000000000002"}
+ with self.assertRaisesRegexp(AnsibleFailJson, "Invalid controller .*? NVMe channel."):
+ with mock.patch(self.REQ_FUNC, return_value=Exception()):
+ nvme.get_target_interface()
+
+ options = {"address": "192.168.1.200", "channel": "2"}
+ self._set_args(options)
+ nvme = NetAppESeriesNvmeInterface()
+ nvme.get_nvmeof_interfaces = lambda: [
+ {"properties": {"provider": "providerInfiniband", "ibProperties": {
+ "ipAddressData": {"addressType": "ipv4",
+ "ipv4Data": {"configState": "configured", "ipv4Address": "192.168.1.100"}}}},
+ "reference": "2201020000000000000000000000000000000000", "channel": 5,
+ "interface_type": {"interfaceRef": "2201020000000000000000000000000000000000", "channel": 5,
+ "linkState": "up"}, "controller_id": "070000000000000000000001",
+ "link_status": "up"}]
+ nvme.get_controllers = lambda: {"A": "070000000000000000000001", "B": "070000000000000000000002"}
+ with self.assertRaisesRegexp(AnsibleFailJson, "Invalid controller .*? NVMe channel."):
+ with mock.patch(self.REQ_FUNC, return_value=Exception()):
+ nvme.get_target_interface()
+
+ def test_update_pass(self):
+ """Verify update successfully completes"""
+ # options = {"state": "enabled", "config_method": "static", "address": "192.168.1.100", "subnet_mask": "255.255.255.0",
+ # "gateway": "192.168.1.1", "mtu": 1500}
+ options = {"address": "192.168.1.200"}
+ iface = {"properties": {"provider": "providerInfiniband",
+ "ibProperties": {"ipAddressData": {"addressType": "ipv4",
+ "ipv4Data": {"configState": "configured", "ipv4Address": "192.168.1.100"}}}},
+ "reference": "2201020000000000000000000000000000000000", "channel": 5, "interface_type": "ib", "controllerRef": "070000000000000000000001",
+ "link_status": "up"}
+ self._set_args(options)
+ nvme = NetAppESeriesNvmeInterface()
+ nvme.get_target_interface = lambda: iface
+ with self.assertRaisesRegexp(AnsibleExitJson, "NVMeoF interface settings have been updated."):
+ with mock.patch(self.REQ_FUNC, return_value=(200, None)):
+ nvme.update()
+
+ options = {"address": "192.168.1.200"}
+ iface = {"properties": {"provider": "providerInfiniband",
+ "ibProperties": {"ipAddressData": {"addressType": "ipv4",
+ "ipv4Data": {"configState": "configured", "ipv4Address": "192.168.1.100"}}}},
+ "reference": "2201020000000000000000000000000000000000", "channel": 5, "interface_type": "ib", "controllerRef": "070000000000000000000001",
+ "link_status": "up"}
+ self._set_args(options)
+ nvme = NetAppESeriesNvmeInterface()
+ nvme.module.check_mode = True
+ nvme.get_target_interface = lambda: iface
+ with self.assertRaisesRegexp(AnsibleExitJson, "No changes have been made."):
+ with mock.patch(self.REQ_FUNC, return_value=(200, None)):
+ nvme.update()
+
+ options = {"address": "192.168.1.100"}
+ iface = {"properties": {"provider": "providerInfiniband",
+ "ibProperties": {"ipAddressData": {"addressType": "ipv4",
+ "ipv4Data": {"configState": "configured", "ipv4Address": "192.168.1.100"}}}},
+ "reference": "2201020000000000000000000000000000000000", "channel": 5, "interface_type": "ib", "controllerRef": "070000000000000000000001",
+ "link_status": "up"}
+ self._set_args(options)
+ nvme = NetAppESeriesNvmeInterface()
+ nvme.get_target_interface = lambda: iface
+
+ with self.assertRaisesRegexp(AnsibleExitJson, "No changes have been made."):
+ with mock.patch(self.REQ_FUNC, return_value=(200, None)):
+ nvme.update()
+
+ def test_update_fail(self):
+ """Verify update throws expected exception."""
+ # options = {"state": "enabled", "config_method": "static", "address": "192.168.1.100", "subnet_mask": "255.255.255.0",
+ # "gateway": "192.168.1.1", "mtu": 1500}
+ options = {"address": "192.168.1.200"}
+ iface = {"properties": {"provider": "providerInfiniband",
+ "ibProperties": {"ipAddressData": {"addressType": "ipv4",
+ "ipv4Data": {"configState": "configured", "ipv4Address": "192.168.1.100"}}}},
+ "reference": "2201020000000000000000000000000000000000", "channel": 5, "interface_type": "ib", "controllerRef": "070000000000000000000001",
+ "link_status": "up"}
+ self._set_args(options)
+ nvme = NetAppESeriesNvmeInterface()
+ nvme.get_target_interface = lambda: iface
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to configure interface."):
+ with mock.patch(self.REQ_FUNC, return_value=Exception()):
+ nvme.update()
diff --git a/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_proxy_drive_firmware_upload.py b/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_proxy_drive_firmware_upload.py
new file mode 100644
index 000000000..a527b2917
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_proxy_drive_firmware_upload.py
@@ -0,0 +1,137 @@
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from units.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args
+from ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_proxy_drive_firmware_upload import NetAppESeriesProxyDriveFirmwareUpload
+from units.compat.mock import patch, mock_open
+
+
+class StoragePoolTest(ModuleTestCase):
+ REQUIRED_PARAMS = {"api_username": "username",
+ "api_password": "password",
+ "api_url": "http://localhost/devmgr/v2",
+ "validate_certs": "no"}
+
+ REQUEST_FUNC = "ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_proxy_drive_firmware_upload." \
+ "NetAppESeriesProxyDriveFirmwareUpload.request"
+ CREATE_MULTIPART_FORMDATA_FUNC = "ansible_collections.netapp_eseries.santricity.plugins.modules." \
+ "na_santricity_proxy_drive_firmware_upload.create_multipart_formdata"
+ OS_PATH_EXISTS_FUNC = "os.path.exists"
+ OS_PATH_ISDIR_FUNC = "os.path.isdir"
+ OS_LISTDIR_FUNC = "os.listdir"
+
+ def _set_args(self, args=None):
+ module_args = self.REQUIRED_PARAMS.copy()
+ if args is not None:
+ module_args.update(args)
+ set_module_args(module_args)
+
+ def test_determine_file_paths_pass(self):
+ """Ensure determine_file_paths method succeeds when all files exist."""
+ self._set_args({"firmware": ["/path/to/firmware1.dlp", "/path/to/firmware/directory"]})
+ firmware = NetAppESeriesProxyDriveFirmwareUpload()
+
+ with patch(self.OS_PATH_EXISTS_FUNC, return_value=True):
+ with patch(self.OS_PATH_ISDIR_FUNC, side_effect=[False, True]):
+ with patch(self.OS_LISTDIR_FUNC, return_value=["firmware2.dlp", "firmware3.dlp"]):
+ firmware.determine_file_paths()
+ self.assertEqual(firmware.files, {"firmware1.dlp": "/path/to/firmware1.dlp",
+ "firmware2.dlp": "/path/to/firmware/directory/firmware2.dlp",
+ "firmware3.dlp": "/path/to/firmware/directory/firmware3.dlp"})
+
+ def test_determine_file_paths_fail(self):
+ """Ensure determine_file_paths method throws expected exception."""
+ self._set_args({"firmware": ["/path/to/firmware1.dlp", "/path/to/firmware/directory"]})
+ firmware = NetAppESeriesProxyDriveFirmwareUpload()
+
+ with self.assertRaisesRegexp(AnsibleFailJson, r"Drive firmware file does not exist!"):
+ with patch(self.OS_PATH_EXISTS_FUNC, side_effect=[True, False]):
+ firmware.determine_file_paths()
+
+ def test_determine_changes_pass(self):
+ """Determine whether determine_changes returns expected results."""
+ self._set_args({"firmware": ["/path/to/firmware1.dlp", "/path/to/firmware/directory"]})
+ firmware = NetAppESeriesProxyDriveFirmwareUpload()
+ firmware.files = {"firmware1.dlp": "/path/to/firmware1.dlp",
+ "firmware2.dlp": "/path/to/firmware/directory/firmware2.dlp",
+ "firmware3.dlp": "/path/to/firmware/directory/firmware3.dlp"}
+
+ with patch(self.REQUEST_FUNC, return_value=(200, [{"fileName": "firmware1.dlp"}, {"fileName": "firmware3.dlp"}, {"fileName": "firmware4.dlp"}])):
+ firmware.determine_changes()
+
+ self.assertEqual(firmware.add_files, ["firmware2.dlp"])
+ self.assertEqual(firmware.remove_files, ["firmware4.dlp"])
+
+ def test_determine_changes_fail(self):
+ """Ensure class constructor fails when file does not exist."""
+ self._set_args({"firmware": ["/path/to/firmware1.dlp", "/path/to/firmware/directory"]})
+ firmware = NetAppESeriesProxyDriveFirmwareUpload()
+
+ with self.assertRaisesRegexp(AnsibleFailJson, r"Failed to retrieve proxy drive firmware file list."):
+ with patch(self.REQUEST_FUNC, return_value=Exception()):
+ firmware.determine_changes()
+
+ def test_upload_files_pass(self):
+ """Ensure upload_files method successfully passes."""
+ self._set_args({"firmware": ["/path/to/firmware1.dlp", "/path/to/firmware/directory"]})
+ firmware = NetAppESeriesProxyDriveFirmwareUpload()
+ firmware.files = {"firmware1.dlp": "/path/to/firmware1.dlp",
+ "firmware2.dlp": "/path/to/firmware/directory/firmware2.dlp",
+ "firmware3.dlp": "/path/to/firmware/directory/firmware3.dlp"}
+ firmware.add_files = ["firmware1.dlp", "firmware2.dlp"]
+
+ with patch(self.CREATE_MULTIPART_FORMDATA_FUNC, return_value=(None, None)):
+ with patch(self.REQUEST_FUNC, return_value=(200, None)):
+ firmware.upload_files()
+
+ def test_delete_files_pass(self):
+ """Ensure delete_files completes as expected."""
+ self._set_args({"firmware": ["/path/to/firmware1.dlp", "/path/to/firmware/directory"]})
+ firmware = NetAppESeriesProxyDriveFirmwareUpload()
+ firmware.remove_files = ["firmware1.dlp", "firmware2.dlp"]
+
+ with patch(self.REQUEST_FUNC, return_value=(204, None)):
+ firmware.delete_files()
+
+ def test_apply_pass(self):
+ """Ensure that the apply method behaves as expected."""
+ self._set_args({"firmware": ["/path/to/firmware1.dlp", "/path/to/firmware/directory"]})
+ firmware = NetAppESeriesProxyDriveFirmwareUpload()
+ firmware.files = {"firmware1.dlp": "/path/to/firmware1.dlp",
+ "firmware2.dlp": "/path/to/firmware/directory/firmware2.dlp",
+ "firmware3.dlp": "/path/to/firmware/directory/firmware3.dlp"}
+ firmware.module.check_mode = True
+ firmware.is_proxy = lambda: True
+ firmware.determine_file_paths = lambda: None
+ firmware.determine_changes = lambda: None
+
+ firmware.add_files = ["firmware1.dlp", "firmware2.dlp"]
+ firmware.remove_files = ["firmware3.dlp", "firmware4.dlp"]
+ with self.assertRaisesRegexp(AnsibleExitJson, r"'changed': True"):
+ firmware.apply()
+
+ firmware.add_files = ["firmware1.dlp", "firmware2.dlp"]
+ firmware.remove_files = []
+ with self.assertRaisesRegexp(AnsibleExitJson, r"'changed': True"):
+ firmware.apply()
+
+ firmware.add_files = []
+ firmware.remove_files = ["firmware3.dlp", "firmware4.dlp"]
+ with self.assertRaisesRegexp(AnsibleExitJson, r"'changed': True"):
+ firmware.apply()
+
+ firmware.add_files = []
+ firmware.remove_files = []
+ with self.assertRaisesRegexp(AnsibleExitJson, r"'changed': False"):
+ firmware.apply()
+
+ def test_apply_fail(self):
+ """Ensure that the apply method fails when not executing against the proxy."""
+ self._set_args({"firmware": ["/path/to/firmware1.dlp", "/path/to/firmware/directory"]})
+ firmware = NetAppESeriesProxyDriveFirmwareUpload()
+ firmware.is_proxy = lambda: False
+
+ with self.assertRaisesRegexp(AnsibleFailJson, r"Module can only be executed against SANtricity Web Services Proxy."):
+ firmware.apply()
diff --git a/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_proxy_firmware_upload.py b/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_proxy_firmware_upload.py
new file mode 100644
index 000000000..72ccd6711
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_proxy_firmware_upload.py
@@ -0,0 +1,136 @@
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from units.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args
+from ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_proxy_firmware_upload import NetAppESeriesProxyFirmwareUpload
+from units.compat.mock import patch, mock_open
+
+
+class StoragePoolTest(ModuleTestCase):
+ REQUIRED_PARAMS = {"api_username": "username",
+ "api_password": "password",
+ "api_url": "http://localhost/devmgr/v2",
+ "validate_certs": "no"}
+
+ REQUEST_FUNC = "ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_proxy_firmware_upload.NetAppESeriesProxyFirmwareUpload.request"
+ CREATE_MULTIPART_FORMDATA_FUNC = "ansible_collections.netapp_eseries.santricity.plugins.modules." \
+ "na_santricity_proxy_firmware_upload.create_multipart_formdata"
+ OS_PATH_EXISTS_FUNC = "os.path.exists"
+ OS_PATH_ISDIR_FUNC = "os.path.isdir"
+ OS_LISTDIR_FUNC = "os.listdir"
+
+ def _set_args(self, args=None):
+ module_args = self.REQUIRED_PARAMS.copy()
+ if args is not None:
+ module_args.update(args)
+ set_module_args(module_args)
+
+ def test_determine_file_paths_pass(self):
+ """Ensure determine_file_paths method succeeds when all files exist."""
+ self._set_args({"firmware": ["/path/to/firmware1.dlp", "/path/to/firmware/directory"]})
+ firmware = NetAppESeriesProxyFirmwareUpload()
+
+ with patch(self.OS_PATH_EXISTS_FUNC, return_value=True):
+ with patch(self.OS_PATH_ISDIR_FUNC, side_effect=[False, True]):
+ with patch(self.OS_LISTDIR_FUNC, return_value=["firmware2.dlp", "firmware3.dlp"]):
+ firmware.determine_file_paths()
+ self.assertEqual(firmware.files, {"firmware1.dlp": "/path/to/firmware1.dlp",
+ "firmware2.dlp": "/path/to/firmware/directory/firmware2.dlp",
+ "firmware3.dlp": "/path/to/firmware/directory/firmware3.dlp"})
+
+ def test_determine_file_paths_fail(self):
+ """Ensure determine_file_paths method throws expected exception."""
+ self._set_args({"firmware": ["/path/to/firmware1.dlp", "/path/to/firmware/directory"]})
+ firmware = NetAppESeriesProxyFirmwareUpload()
+
+ with self.assertRaisesRegexp(AnsibleFailJson, r"Drive firmware file does not exist!"):
+ with patch(self.OS_PATH_EXISTS_FUNC, side_effect=[True, False]):
+ firmware.determine_file_paths()
+
+ def test_determine_changes_pass(self):
+ """Determine whether determine_changes returns expected results."""
+ self._set_args({"firmware": ["/path/to/firmware1.dlp", "/path/to/firmware/directory"]})
+ firmware = NetAppESeriesProxyFirmwareUpload()
+ firmware.files = {"firmware1.dlp": "/path/to/firmware1.dlp",
+ "firmware2.dlp": "/path/to/firmware/directory/firmware2.dlp",
+ "firmware3.dlp": "/path/to/firmware/directory/firmware3.dlp"}
+
+ with patch(self.REQUEST_FUNC, return_value=(200, [{"filename": "firmware1.dlp"}, {"filename": "firmware3.dlp"}, {"filename": "firmware4.dlp"}])):
+ firmware.determine_changes()
+
+ self.assertEqual(firmware.add_files, ["firmware2.dlp"])
+ self.assertEqual(firmware.remove_files, ["firmware4.dlp"])
+
+ def test_determine_changes_fail(self):
+ """Ensure class constructor fails when file does not exist."""
+ self._set_args({"firmware": ["/path/to/firmware1.dlp", "/path/to/firmware/directory"]})
+ firmware = NetAppESeriesProxyFirmwareUpload()
+
+ with self.assertRaisesRegexp(AnsibleFailJson, r"Failed to retrieve current firmware file listing."):
+ with patch(self.REQUEST_FUNC, return_value=Exception()):
+ firmware.determine_changes()
+
+ def test_upload_files_pass(self):
+ """Ensure upload_files method successfully passes."""
+ self._set_args({"firmware": ["/path/to/firmware1.dlp", "/path/to/firmware/directory"]})
+ firmware = NetAppESeriesProxyFirmwareUpload()
+ firmware.files = {"firmware1.dlp": "/path/to/firmware1.dlp",
+ "firmware2.dlp": "/path/to/firmware/directory/firmware2.dlp",
+ "firmware3.dlp": "/path/to/firmware/directory/firmware3.dlp"}
+ firmware.add_files = ["firmware1.dlp", "firmware2.dlp"]
+
+ with patch(self.CREATE_MULTIPART_FORMDATA_FUNC, return_value=(None, None)):
+ with patch(self.REQUEST_FUNC, return_value=(200, None)):
+ firmware.upload_files()
+
+ def test_delete_files_pass(self):
+ """Ensure delete_files completes as expected."""
+ self._set_args({"firmware": ["/path/to/firmware1.dlp", "/path/to/firmware/directory"]})
+ firmware = NetAppESeriesProxyFirmwareUpload()
+ firmware.remove_files = ["firmware1.dlp", "firmware2.dlp"]
+
+ with patch(self.REQUEST_FUNC, return_value=(204, None)):
+ firmware.delete_files()
+
+ def test_apply_pass(self):
+ """Ensure that the apply method behaves as expected."""
+ self._set_args({"firmware": ["/path/to/firmware1.dlp", "/path/to/firmware/directory"]})
+ firmware = NetAppESeriesProxyFirmwareUpload()
+ firmware.files = {"firmware1.dlp": "/path/to/firmware1.dlp",
+ "firmware2.dlp": "/path/to/firmware/directory/firmware2.dlp",
+ "firmware3.dlp": "/path/to/firmware/directory/firmware3.dlp"}
+ firmware.module.check_mode = True
+ firmware.is_proxy = lambda: True
+ firmware.determine_file_paths = lambda: None
+ firmware.determine_changes = lambda: None
+
+ firmware.add_files = ["firmware1.dlp", "firmware2.dlp"]
+ firmware.remove_files = ["firmware3.dlp", "firmware4.dlp"]
+ with self.assertRaisesRegexp(AnsibleExitJson, r"'changed': True"):
+ firmware.apply()
+
+ firmware.add_files = ["firmware1.dlp", "firmware2.dlp"]
+ firmware.remove_files = []
+ with self.assertRaisesRegexp(AnsibleExitJson, r"'changed': True"):
+ firmware.apply()
+
+ firmware.add_files = []
+ firmware.remove_files = ["firmware3.dlp", "firmware4.dlp"]
+ with self.assertRaisesRegexp(AnsibleExitJson, r"'changed': True"):
+ firmware.apply()
+
+ firmware.add_files = []
+ firmware.remove_files = []
+ with self.assertRaisesRegexp(AnsibleExitJson, r"'changed': False"):
+ firmware.apply()
+
+ def test_apply_fail(self):
+ """Ensure that the apply method fails when not executing against the proxy."""
+ self._set_args({"firmware": ["/path/to/firmware1.dlp", "/path/to/firmware/directory"]})
+ firmware = NetAppESeriesProxyFirmwareUpload()
+ firmware.is_proxy = lambda: False
+
+ with self.assertRaisesRegexp(AnsibleFailJson, r"Module can only be executed against SANtricity Web Services Proxy."):
+ firmware.apply()
diff --git a/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_proxy_systems.py b/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_proxy_systems.py
new file mode 100644
index 000000000..31e078203
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_proxy_systems.py
@@ -0,0 +1,497 @@
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible.module_utils import six
+from units.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args
+from ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_proxy_systems import NetAppESeriesProxySystems
+from units.compat import mock
+
+
+class StoragePoolTest(ModuleTestCase):
+ REQUIRED_PARAMS = {"api_username": "username",
+ "api_password": "password",
+ "api_url": "http://localhost/devmgr/v2",
+ "validate_certs": "no"}
+
+ REQUEST_FUNC = "ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_proxy_systems.NetAppESeriesProxySystems.request"
+ _REQUEST_FUNC = "ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_proxy_systems.NetAppESeriesProxySystems._request"
+ TIME_FUNC = "ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_proxy_systems.sleep"
+
+ def _set_args(self, args=None):
+ module_args = self.REQUIRED_PARAMS.copy()
+ if args is not None:
+ module_args.update(args)
+ set_module_args(module_args)
+
+ def test_valid_options_pass(self):
+ """Verify valid options."""
+ options_list = [{"password": "password", "systems": [{"ssid": "10", "serial": "021633035190"},
+ {"addresses": ["192.168.1.100"]},
+ {"serial": "021628016299"}]},
+ {"password": "password", "systems": ["021178889999", "022348016297", "021625436296"]},
+ {"password": "password", "systems": []}, {}]
+
+ for options in options_list:
+ self._set_args(options)
+ systems = NetAppESeriesProxySystems()
+
+ self._set_args(options_list[0])
+ systems = NetAppESeriesProxySystems()
+ self.assertEquals(systems.systems, [
+ {"ssid": "10", "serial": "021633035190", "password": "password", "password_valid": None, "password_set": None, "stored_password_valid": None,
+ "meta_tags": [], "controller_addresses": [], "embedded_available": None, "accept_certificate": False, "current_info": {}, "changes": {},
+ "updated_required": False, "failed": False, "discovered": False},
+ {"ssid": "192.168.1.100", "serial": "", "password": "password", "password_valid": None, "password_set": None, "stored_password_valid": None,
+ "meta_tags": [], "controller_addresses": ["192.168.1.100"], "embedded_available": None, "accept_certificate": False, "current_info": {},
+ "changes": {}, "updated_required": False, "failed": False, "discovered": False},
+ {"ssid": "021628016299", "serial": "021628016299", "password": "password", "password_valid": None, "password_set": None,
+ "stored_password_valid": None, "meta_tags": [], "controller_addresses": [], "embedded_available": None, "accept_certificate": False,
+ "current_info": {}, "changes": {}, "updated_required": False, "failed": False, "discovered": False}])
+
+ def test_invalid_options_fail(self):
+ """Verify invalid systems option throws expected exception."""
+ self._set_args({"password": "password", "systems": [[]]})
+ with self.assertRaisesRegexp(AnsibleFailJson, "Invalid system! All systems must either be a simple serial number or a dictionary."):
+ systems = NetAppESeriesProxySystems()
+
+ def test_discover_array_pass(self):
+ """Verify the discover_array method."""
+ self._set_args({"password": "password", "subnet_mask": "192.168.1.0/24",
+ "systems": [{"ssid": "1", "serial": "1"}, {"addresses": ["192.168.1.36"]}, {"serial": "2"}, {"serial": "5"}]})
+ response = {"discoverProcessRunning": False, "storageSystems": [{"serialNumber": "1", "ipAddresses": ["192.168.1.5", "192.168.1.6"],
+ "supportedManagementPorts": ["https", "symbol"]},
+ {"serialNumber": "2", "ipAddresses": ["192.168.1.15", "192.168.1.16"],
+ "supportedManagementPorts": ["symbol"]},
+ {"serialNumber": "3", "ipAddresses": ["192.168.1.25", "192.168.1.26"],
+ "supportedManagementPorts": ["https", "symbol"]},
+ {"serialNumber": "4", "ipAddresses": ["192.168.1.35", "192.168.1.36"],
+ "supportedManagementPorts": ["symbol"]}]}
+ systems = NetAppESeriesProxySystems()
+ with mock.patch(self.TIME_FUNC, return_value=None):
+ with mock.patch(self.REQUEST_FUNC, side_effect=[(200, {"requestId": "1"}), (200, {"discoverProcessRunning": True}), (200, response)]):
+ systems.discover_array()
+ self.assertEquals(systems.systems, [
+ {"ssid": "1", "serial": "1", "password": "password", "password_valid": None, "password_set": None, "stored_password_valid": None,
+ "meta_tags": [], "controller_addresses": ["192.168.1.5", "192.168.1.6"], "embedded_available": True, "accept_certificate": True,
+ "current_info": {}, "changes": {}, "updated_required": False, "failed": False, "discovered": True},
+ {"ssid": "192.168.1.36", "serial": "", "password": "password", "password_valid": None, "password_set": None, "stored_password_valid": None,
+ "meta_tags": [], "controller_addresses": ["192.168.1.35", "192.168.1.36"], "embedded_available": False, "accept_certificate": False,
+ "current_info": {}, "changes": {}, "updated_required": False, "failed": False, "discovered": True},
+ {"ssid": "2", "serial": "2", "password": "password", "password_valid": None, "password_set": None, "stored_password_valid": None,
+ "meta_tags": [], "controller_addresses": ["192.168.1.15", "192.168.1.16"], "embedded_available": False, "accept_certificate": False,
+ "current_info": {}, "changes": {}, "updated_required": False, "failed": False, "discovered": True}])
+
+ self._set_args({"password": "password", "subnet_mask": "192.168.1.0/24", "add_discovered_systems": True})
+ response = {"discoverProcessRunning": False, "storageSystems": [{"serialNumber": "1", "ipAddresses": ["192.168.1.5", "192.168.1.6"],
+ "supportedManagementPorts": ["https", "symbol"]},
+ {"serialNumber": "2", "ipAddresses": ["192.168.1.15", "192.168.1.16"],
+ "supportedManagementPorts": ["symbol"]},
+ {"serialNumber": "3", "ipAddresses": ["192.168.1.25", "192.168.1.26"],
+ "supportedManagementPorts": ["https", "symbol"]},
+ {"serialNumber": "4", "ipAddresses": ["192.168.1.35", "192.168.1.36"],
+ "supportedManagementPorts": ["symbol"]}]}
+ systems = NetAppESeriesProxySystems()
+ with mock.patch(self.TIME_FUNC, return_value=None):
+ with mock.patch(self.REQUEST_FUNC, side_effect=[(200, {"requestId": "1"}), (200, {"discoverProcessRunning": True}), (200, response)]):
+ systems.discover_array()
+ self.assertEquals(systems.systems, [
+ {"ssid": "1", "serial": "1", "password": "password", "password_valid": None, "password_set": None, "stored_password_valid": None,
+ "meta_tags": [], "controller_addresses": ["192.168.1.5", "192.168.1.6"], "embedded_available": True, "accept_certificate": True,
+ "current_info": {}, "changes": {}, "updated_required": False, "failed": False, "discovered": True},
+ {"ssid": "2", "serial": "2", "password": "password", "password_valid": None, "password_set": None, "stored_password_valid": None,
+ "meta_tags": [], "controller_addresses": ["192.168.1.15", "192.168.1.16"], "embedded_available": False, "accept_certificate": False,
+ "current_info": {}, "changes": {}, "updated_required": False, "failed": False, "discovered": True},
+ {"ssid": "3", "serial": "3", "password": "password", "password_valid": None, "password_set": None, "stored_password_valid": None,
+ "meta_tags": [], "controller_addresses": ["192.168.1.25", "192.168.1.26"], "embedded_available": True, "accept_certificate": True,
+ "current_info": {}, "changes": {}, "updated_required": False, "failed": False, "discovered": True},
+ {"ssid": "4", "serial": "4", "password": "password", "password_valid": None, "password_set": None, "stored_password_valid": None,
+ "meta_tags": [], "controller_addresses": ["192.168.1.35", "192.168.1.36"], "embedded_available": False, "accept_certificate": False,
+ "current_info": {}, "changes": {}, "updated_required": False, "failed": False, "discovered": True}])
+
+ def test_discover_array_fail(self):
+ """Verify discover_array method throws expected exceptions."""
+ self._set_args({"password": "password", "subnet_mask": "192.168.1.0/24", "add_discovered_systems": True})
+ systems = NetAppESeriesProxySystems()
+ with self.assertRaisesRegex(AnsibleFailJson, "Failed to initiate array discovery."):
+ with mock.patch(self.TIME_FUNC, return_value=None):
+ with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
+ systems.discover_array()
+
+ self._set_args({"password": "password", "subnet_mask": "192.168.1.0/24", "add_discovered_systems": True})
+ systems = NetAppESeriesProxySystems()
+ with self.assertRaisesRegex(AnsibleFailJson, "Failed to get the discovery results."):
+ with mock.patch(self.TIME_FUNC, return_value=None):
+ with mock.patch(self.REQUEST_FUNC, side_effect=[(200, {"requestId": "1"}), Exception()]):
+ systems.discover_array()
+
+ self._set_args({"password": "password", "subnet_mask": "192.168.1.0/24", "add_discovered_systems": True})
+ systems = NetAppESeriesProxySystems()
+ with self.assertRaisesRegex(AnsibleFailJson, "Timeout waiting for array discovery process."):
+ with mock.patch(self.TIME_FUNC, return_value=None):
+ with mock.patch(self.REQUEST_FUNC, side_effect=[(200, {"requestId": "1"})] + [(200, {"discoverProcessRunning": True})] * 1000):
+ systems.discover_array()
+
+ def test_update_storage_systems_info_pass(self):
+ """Verify update_storage_systems_info method performs correctly."""
+ self._set_args({"password": "password", "subnet_mask": "192.168.1.0/24",
+ "systems": [{"ssid": "1", "serial": "1"}, {"addresses": ["192.168.1.36"]}, {"serial": "2"}, {"serial": "5"}]})
+ systems = NetAppESeriesProxySystems()
+ systems.systems = [
+ {"ssid": "1", "serial": "1", "password": "password", "password_valid": None, "password_set": None, "stored_password_valid": None,
+ "meta_tags": [], "controller_addresses": ["192.168.1.5", "192.168.1.6"], "embedded_available": True, "accept_certificate": True,
+ "current_info": {}, "changes": {}, "updated_required": False, "failed": False, "discovered": True},
+ {"ssid": "192.168.1.36", "serial": "", "password": "password", "password_valid": None, "password_set": None, "stored_password_valid": None,
+ "meta_tags": [], "controller_addresses": ["192.168.1.35", "192.168.1.36"], "embedded_available": False, "accept_certificate": False,
+ "current_info": {}, "changes": {}, "updated_required": False, "failed": False, "discovered": True},
+ {"ssid": "2", "serial": "2", "password": "password", "password_valid": None, "password_set": None, "stored_password_valid": None,
+ "meta_tags": [], "controller_addresses": ["192.168.1.15", "192.168.1.16"], "embedded_available": False, "accept_certificate": False,
+ "current_info": {}, "changes": {}, "updated_required": False, "failed": False, "discovered": True}]
+
+ with mock.patch(self.REQUEST_FUNC, return_value=(200, [{"id": "1", "passwordStatus": "valid", "metaTags": []},
+ {"id": "5", "passwordStatus": "valid", "metaTags": []}])):
+ systems.update_storage_systems_info()
+ self.assertEquals(systems.systems_to_remove, ["5"])
+ self.assertEquals(systems.systems_to_add, [
+ {"ssid": "192.168.1.36", "serial": "", "password": "password", "password_valid": None, "password_set": None,
+ "stored_password_valid": None, "meta_tags": [], "controller_addresses": ["192.168.1.35", "192.168.1.36"], "embedded_available": False,
+ "accept_certificate": False, "current_info": {}, "changes": {}, "updated_required": False, "failed": False, "discovered": True},
+ {"ssid": "2", "serial": "2", "password": "password", "password_valid": None, "password_set": None, "stored_password_valid": None,
+ "meta_tags": [], "controller_addresses": ["192.168.1.15", "192.168.1.16"], "embedded_available": False, "accept_certificate": False,
+ "current_info": {}, "changes": {}, "updated_required": False, "failed": False, "discovered": True}])
+
+ def test_update_storage_systems_info_fail(self):
+ """Verify update_storage_systems_info throws expected exceptions."""
+ self._set_args({"password": "password", "subnet_mask": "192.168.1.0/24",
+ "systems": [{"ssid": "1", "serial": "1"}, {"addresses": ["192.168.1.36"]}, {"serial": "2"}, {"serial": "5"}]})
+ systems = NetAppESeriesProxySystems()
+ systems.systems = [
+ {"ssid": "1", "serial": "1", "password": "password", "password_valid": None, "password_set": None, "stored_password_valid": None,
+ "meta_tags": [], "controller_addresses": ["192.168.1.5", "192.168.1.6"], "embedded_available": True, "accept_certificate": True,
+ "current_info": {}, "changes": {}, "updated_required": False, "failed": False, "discovered": True},
+ {"ssid": "192.168.1.36", "serial": "", "password": "password", "password_valid": None, "password_set": None, "stored_password_valid": None,
+ "meta_tags": [], "controller_addresses": ["192.168.1.35", "192.168.1.36"], "embedded_available": False, "accept_certificate": False,
+ "current_info": {}, "changes": {}, "updated_required": False, "failed": False, "discovered": True},
+ {"ssid": "2", "serial": "2", "password": "password", "password_valid": None, "password_set": None, "stored_password_valid": None,
+ "meta_tags": [], "controller_addresses": ["192.168.1.15", "192.168.1.16"], "embedded_available": False, "accept_certificate": False,
+ "current_info": {}, "changes": {}, "updated_required": False, "failed": False, "discovered": True}]
+
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to retrieve storage systems."):
+ with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
+ systems.update_storage_systems_info()
+
+ def test_set_password_pass(self):
+ """Verify set_password completes as expected."""
+ system = {"ssid": "1", "serial": "1", "password": "password", "password_valid": None, "password_set": None, "stored_password_valid": None,
+ "meta_tags": [], "controller_addresses": ["192.168.1.5", "192.168.1.6"], "embedded_available": True, "accept_certificate": True,
+ "current_info": {}, "changes": {}, "updated_required": False, "failed": False, "discovered": True}
+ self._set_args({"password": "password", "subnet_mask": "192.168.1.0/24",
+ "systems": [{"ssid": "1", "serial": "1"}, {"addresses": ["192.168.1.36"]}, {"serial": "2"}, {"serial": "5"}]})
+ systems = NetAppESeriesProxySystems()
+ with mock.patch(self.TIME_FUNC, return_value=None):
+ with mock.patch(self._REQUEST_FUNC, return_value=(200, None)):
+ systems.set_password(system)
+ self.assertFalse(system["password_set"])
+
+ self._set_args({"password": "password", "subnet_mask": "192.168.1.0/24",
+ "systems": [{"ssid": "1", "serial": "1"}, {"addresses": ["192.168.1.36"]}, {"serial": "2"}, {"serial": "5"}]})
+ systems = NetAppESeriesProxySystems()
+ with mock.patch(self.TIME_FUNC, return_value=None):
+ with mock.patch(self._REQUEST_FUNC, return_value=(401, None)):
+ systems.set_password(system)
+ self.assertTrue(system["password_set"])
+
+ def test_set_password_fail(self):
+ """Verify set_password throws expected exceptions."""
+ system = {"ssid": "1", "serial": "1", "password": "password", "password_valid": None, "password_set": None, "stored_password_valid": None,
+ "meta_tags": [], "controller_addresses": ["192.168.1.5", "192.168.1.6"], "embedded_available": True, "accept_certificate": True,
+ "current_info": {}, "changes": {}, "updated_required": False, "failed": False, "discovered": True}
+ self._set_args({"password": "password", "subnet_mask": "192.168.1.0/24",
+ "systems": [{"ssid": "1", "serial": "1"}, {"addresses": ["192.168.1.36"]}, {"serial": "2"}, {"serial": "5"}]})
+ systems = NetAppESeriesProxySystems()
+ with mock.patch(self.TIME_FUNC, return_value=None):
+ with mock.patch(self._REQUEST_FUNC, return_value=Exception()):
+ systems.set_password(system)
+ self.assertTrue(system["failed"])
+
+ system = {"ssid": "1", "serial": "1", "password": "password", "password_valid": None, "password_set": None, "stored_password_valid": None,
+ "meta_tags": [], "controller_addresses": ["192.168.1.5", "192.168.1.6"], "embedded_available": True, "accept_certificate": True,
+ "current_info": {}, "changes": {}, "updated_required": False, "failed": False, "discovered": True}
+ self._set_args({"password": "password", "subnet_mask": "192.168.1.0/24",
+ "systems": [{"ssid": "1", "serial": "1"}, {"addresses": ["192.168.1.36"]}, {"serial": "2"}, {"serial": "5"}]})
+ systems = NetAppESeriesProxySystems()
+ with mock.patch(self.TIME_FUNC, return_value=None):
+ with mock.patch(self._REQUEST_FUNC, side_effect=[(200, None), Exception(), Exception(), Exception()]):
+ systems.set_password(system)
+ self.assertTrue(system["failed"])
+
+ def test_update_system_changes_pass(self):
+ """Verify system changes."""
+ system = {"ssid": "1", "serial": "1", "password": "password", "password_valid": None, "password_set": None, "stored_password_valid": None,
+ "meta_tags": [], "controller_addresses": ["192.168.1.5", "192.168.1.6"], "embedded_available": True, "accept_certificate": True,
+ "current_info": {}, "changes": {}, "updated_required": False, "failed": False, "discovered": True}
+ self._set_args({"password": "password", "subnet_mask": "192.168.1.0/24",
+ "systems": [{"ssid": "1", "serial": "1"}, {"addresses": ["192.168.1.36"]}, {"serial": "2"}, {"serial": "5"}]})
+ systems = NetAppESeriesProxySystems()
+ systems.update_system_changes(system)
+ self.assertEquals(system["changes"], {})
+
+ system = {"ssid": "1", "serial": "1", "password": "password", "password_valid": None, "password_set": None, "stored_password_valid": None,
+ "meta_tags": [], "controller_addresses": ["192.168.1.5", "192.168.1.6"], "embedded_available": True, "accept_certificate": True,
+ "current_info": {"managementPaths": ["192.168.1.25", "192.168.1.6"], "metaTags": [],
+ "controllers": [{"certificateStatus": "trusted"}, {"certificateStatus": "trusted"}]},
+ "changes": {}, "updated_required": False, "failed": False, "discovered": True}
+ self._set_args({"password": "password", "subnet_mask": "192.168.1.0/24",
+ "systems": [{"ssid": "1", "serial": "1"}, {"addresses": ["192.168.1.36"]}, {"serial": "2"}, {"serial": "5"}]})
+ systems = NetAppESeriesProxySystems()
+ systems.update_system_changes(system)
+ self.assertEquals(system["changes"], {"controllerAddresses": ["192.168.1.5", "192.168.1.6"]})
+
+ system = {"ssid": "1", "serial": "1", "password": "password", "password_valid": None, "password_set": None, "stored_password_valid": None,
+ "meta_tags": [], "controller_addresses": ["192.168.1.5", "192.168.1.6"], "embedded_available": True, "accept_certificate": True,
+ "current_info": {"managementPaths": ["192.168.1.5", "192.168.1.6"], "metaTags": [], "ip1": "192.168.1.5", "ip2": "192.168.1.6",
+ "controllers": [{"certificateStatus": "trusted"}, {"certificateStatus": "unknown"}]},
+ "changes": {}, "updated_required": False, "failed": False, "discovered": True}
+ self._set_args({"password": "password", "subnet_mask": "192.168.1.0/24",
+ "systems": [{"ssid": "1", "serial": "1"}, {"addresses": ["192.168.1.36"]}, {"serial": "2"}, {"serial": "5"}]})
+ systems = NetAppESeriesProxySystems()
+ systems.update_system_changes(system)
+ self.assertEquals(system["changes"], {"acceptCertificate": True})
+
+ system = {"ssid": "1", "serial": "1", "password": "password", "password_valid": None, "password_set": None, "stored_password_valid": None,
+ "meta_tags": [], "controller_addresses": ["192.168.1.5", "192.168.1.6"], "embedded_available": True, "accept_certificate": True,
+ "current_info": {"managementPaths": ["192.168.1.5", "192.168.1.6"], "metaTags": [{"key": "key", "value": "1"}], "ip1": "192.168.1.5",
+ "ip2": "192.168.1.6",
+ "controllers": [{"certificateStatus": "trusted"}, {"certificateStatus": "trusted"}]},
+ "changes": {}, "updated_required": False, "failed": False, "discovered": True}
+ self._set_args({"password": "password", "subnet_mask": "192.168.1.0/24",
+ "systems": [{"ssid": "1", "serial": "1"}, {"addresses": ["192.168.1.36"]}, {"serial": "2"}, {"serial": "5"}]})
+ systems = NetAppESeriesProxySystems()
+ systems.update_system_changes(system)
+ self.assertEquals(system["changes"], {"removeAllTags": True})
+
+ system = {"ssid": "1", "serial": "1", "password": "password", "password_valid": None, "password_set": None, "stored_password_valid": None,
+ "meta_tags": [{"key": "key", "value": "1"}], "controller_addresses": ["192.168.1.5", "192.168.1.6"], "embedded_available": True,
+ "accept_certificate": True,
+ "current_info": {"managementPaths": ["192.168.1.5", "192.168.1.6"], "metaTags": [], "ip1": "192.168.1.5", "ip2": "192.168.1.6",
+ "controllers": [{"certificateStatus": "trusted"}, {"certificateStatus": "trusted"}]},
+ "changes": {}, "updated_required": False, "failed": False, "discovered": True}
+ self._set_args({"password": "password", "subnet_mask": "192.168.1.0/24",
+ "systems": [{"ssid": "1", "serial": "1"}, {"addresses": ["192.168.1.36"]}, {"serial": "2"}, {"serial": "5"}]})
+ systems = NetAppESeriesProxySystems()
+ systems.update_system_changes(system)
+ self.assertEquals(system["changes"], {"metaTags": [{"key": "key", "value": "1"}]})
+
+ def test_add_system_pass(self):
+ """Validate add_system method."""
+ system = {"ssid": "1", "serial": "1", "password": "password", "meta_tags": [{"key": "key", "value": "1"}],
+ "controller_addresses": ["192.168.1.5", "192.168.1.6"], "accept_certificate": True}
+ self._set_args({"password": "password", "subnet_mask": "192.168.1.0/24",
+ "systems": [{"ssid": "1", "serial": "1"}, {"addresses": ["192.168.1.36"]}, {"serial": "2"}, {"serial": "5"}]})
+ systems = NetAppESeriesProxySystems()
+ systems.set_password = lambda x: None
+ with mock.patch(self.TIME_FUNC, return_value=None):
+ with mock.patch(self.REQUEST_FUNC, side_effect=[(200, None), (200, None)]):
+ systems.add_system(system)
+
+ system = {"ssid": "1", "serial": "1", "password": "password", "meta_tags": [],
+ "controller_addresses": ["192.168.1.5", "192.168.1.6"], "accept_certificate": False}
+ self._set_args({"password": "password", "subnet_mask": "192.168.1.0/24",
+ "systems": [{"ssid": "1", "serial": "1"}, {"addresses": ["192.168.1.36"]}, {"serial": "2"}, {"serial": "5"}]})
+ systems = NetAppESeriesProxySystems()
+ systems.set_password = lambda x: None
+ with mock.patch(self.TIME_FUNC, return_value=None):
+ with mock.patch(self.REQUEST_FUNC, side_effect=[(200, None), (200, None)]):
+ systems.add_system(system)
+
+ # Test warning situations, tests should still succeed
+ system = {"ssid": "1", "serial": "1", "password": "password", "meta_tags": [{"key": "key", "value": "1"}],
+ "controller_addresses": ["192.168.1.5", "192.168.1.6"], "accept_certificate": True}
+ self._set_args({"password": "password", "subnet_mask": "192.168.1.0/24",
+ "systems": [{"ssid": "1", "serial": "1"}, {"addresses": ["192.168.1.36"]}, {"serial": "2"}, {"serial": "5"}]})
+ systems = NetAppESeriesProxySystems()
+ systems.set_password = lambda x: None
+ with mock.patch(self.TIME_FUNC, return_value=None):
+ with mock.patch(self.REQUEST_FUNC, side_effect=[Exception(), Exception()]):
+ systems.add_system(system)
+
+ system = {"ssid": "1", "serial": "1", "password": "password", "meta_tags": [{"key": "key", "value": "1"}],
+ "controller_addresses": ["192.168.1.5", "192.168.1.6"], "accept_certificate": True}
+ self._set_args({"password": "password", "subnet_mask": "192.168.1.0/24",
+ "systems": [{"ssid": "1", "serial": "1"}, {"addresses": ["192.168.1.36"]}, {"serial": "2"}, {"serial": "5"}]})
+ systems = NetAppESeriesProxySystems()
+ systems.set_password = lambda x: None
+ with mock.patch(self.TIME_FUNC, return_value=None):
+ with mock.patch(self.REQUEST_FUNC, side_effect=[(200, None), Exception()]):
+ systems.add_system(system)
+
+ def test_update_system_pass(self):
+ """Validate update_system method."""
+ system = {"ssid": "1", "changes": {}}
+ self._set_args({"password": "password", "subnet_mask": "192.168.1.0/24",
+ "systems": [{"ssid": "1", "serial": "1"}, {"addresses": ["192.168.1.36"]}, {"serial": "2"}, {"serial": "5"}]})
+ systems = NetAppESeriesProxySystems()
+ systems.set_password = lambda x: None
+ with mock.patch(self.TIME_FUNC, return_value=None):
+ with mock.patch(self.REQUEST_FUNC, return_value=(200, None)):
+ systems.update_system(system)
+
+ system = {"ssid": "1", "changes": {}}
+ self._set_args({"password": "password", "subnet_mask": "192.168.1.0/24",
+ "systems": [{"ssid": "1", "serial": "1"}, {"addresses": ["192.168.1.36"]}, {"serial": "2"}, {"serial": "5"}]})
+ systems = NetAppESeriesProxySystems()
+ systems.set_password = lambda x: None
+ with mock.patch(self.TIME_FUNC, return_value=None):
+ with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
+ systems.update_system(system)
+
+ def test_remove_system_pass(self):
+ """Validate remove_system method."""
+ self._set_args({"password": "password", "subnet_mask": "192.168.1.0/24",
+ "systems": [{"ssid": "1", "serial": "1"}, {"addresses": ["192.168.1.36"]}, {"serial": "2"}, {"serial": "5"}]})
+ systems = NetAppESeriesProxySystems()
+ systems.set_password = lambda x: None
+ with mock.patch(self.TIME_FUNC, return_value=None):
+ with mock.patch(self.REQUEST_FUNC, return_value=(200, None)):
+ systems.remove_system("1")
+
+ self._set_args({"password": "password", "subnet_mask": "192.168.1.0/24",
+ "systems": [{"ssid": "1", "serial": "1"}, {"addresses": ["192.168.1.36"]}, {"serial": "2"}, {"serial": "5"}]})
+ systems = NetAppESeriesProxySystems()
+ systems.set_password = lambda x: None
+ with mock.patch(self.TIME_FUNC, return_value=None):
+ with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
+ systems.remove_system("1")
+
+ def test_apply_pass(self):
+ """Validate apply method."""
+ self._set_args({"password": "password", "subnet_mask": "192.168.1.0/24", "add_discovered_systems": False,
+ "systems": [{"ssid": "1", "serial": "1"}, {"addresses": ["192.168.1.36"]}, {"serial": "2"}, {"serial": "5"}]})
+ systems = NetAppESeriesProxySystems()
+ systems.is_embedded = lambda: False
+ systems.discover_array = lambda: None
+ systems.update_storage_systems_info = lambda: None
+ systems.update_system_changes = lambda x: None
+ systems.remove_system = lambda x: None
+ systems.add_system = lambda x: None
+ systems.update_system = lambda x: None
+ systems.systems = [{"ssid": "1", "serial": "1", "password": "password", "password_valid": None, "password_set": None, "stored_password_valid": None,
+ "meta_tags": [], "controller_addresses": ["192.168.1.5", "192.168.1.6"], "embedded_available": True, "accept_certificate": True,
+ "current_info": {}, "changes": {}, "updated_required": False, "failed": False, "discovered": True},
+ {"ssid": "192.168.1.36", "serial": "", "password": "password", "password_valid": None, "password_set": None,
+ "stored_password_valid": None,
+ "meta_tags": [], "controller_addresses": ["192.168.1.35", "192.168.1.36"], "embedded_available": False, "accept_certificate": False,
+ "current_info": {}, "changes": {}, "updated_required": False, "failed": False, "discovered": True},
+ {"ssid": "2", "serial": "2", "password": "password", "password_valid": None, "password_set": None, "stored_password_valid": None,
+ "meta_tags": [], "controller_addresses": ["192.168.1.15", "192.168.1.16"], "embedded_available": False, "accept_certificate": False,
+ "current_info": {}, "changes": {}, "updated_required": False, "failed": False, "discovered": True}]
+ systems.systems_to_remove = ["5"]
+ systems.systems_to_add = [{"ssid": "192.168.1.36", "serial": "", "password": "password", "password_valid": None, "password_set": None,
+ "stored_password_valid": None, "meta_tags": [], "controller_addresses": ["192.168.1.35", "192.168.1.36"],
+ "embedded_available": False,
+ "accept_certificate": False, "current_info": {}, "changes": {}, "updated_required": False, "failed": False,
+ "discovered": True},
+ {"ssid": "2", "serial": "2", "password": "password", "password_valid": None, "password_set": None,
+ "stored_password_valid": None,
+ "meta_tags": [], "controller_addresses": ["192.168.1.15", "192.168.1.16"], "embedded_available": False,
+ "accept_certificate": False,
+ "current_info": {}, "changes": {}, "updated_required": False, "failed": False, "discovered": True}]
+ systems.systems_to_update = [{"ssid": "192.168.1.36", "serial": "", "password": "password", "password_valid": None, "password_set": None,
+ "stored_password_valid": None, "meta_tags": [], "controller_addresses": ["192.168.1.35", "192.168.1.36"],
+ "embedded_available": False,
+ "accept_certificate": False, "current_info": {}, "changes": {}, "updated_required": False, "failed": False,
+ "discovered": True},
+ {"ssid": "2", "serial": "2", "password": "password", "password_valid": None, "password_set": None,
+ "stored_password_valid": None,
+ "meta_tags": [], "controller_addresses": ["192.168.1.15", "192.168.1.16"], "embedded_available": False,
+ "accept_certificate": False,
+ "current_info": {}, "changes": {}, "updated_required": False, "failed": False, "discovered": True}]
+ with self.assertRaisesRegexp(AnsibleExitJson, "systems added.*?systems updated.*?system removed"):
+ systems.apply()
+
+ self._set_args({"password": "password", "subnet_mask": "192.168.1.0/24", "add_discovered_systems": False,
+ "systems": [{"ssid": "1", "serial": "1"}, {"addresses": ["192.168.1.36"]}, {"serial": "2"}, {"serial": "5"}]})
+ systems = NetAppESeriesProxySystems()
+ systems.is_embedded = lambda: False
+ systems.discover_array = lambda: None
+ systems.update_storage_systems_info = lambda: None
+ systems.update_system_changes = lambda x: None
+ systems.remove_system = lambda x: None
+ systems.add_system = lambda x: None
+ systems.update_system = lambda x: None
+ systems.systems = [{"ssid": "1", "serial": "1", "password": "password", "password_valid": None, "password_set": None, "stored_password_valid": None,
+ "meta_tags": [], "controller_addresses": ["192.168.1.5", "192.168.1.6"], "embedded_available": True, "accept_certificate": True,
+ "current_info": {}, "changes": {}, "updated_required": False, "failed": False, "discovered": True},
+ {"ssid": "192.168.1.36", "serial": "", "password": "password", "password_valid": None, "password_set": None,
+ "stored_password_valid": None,
+ "meta_tags": [], "controller_addresses": ["192.168.1.35", "192.168.1.36"], "embedded_available": False, "accept_certificate": False,
+ "current_info": {}, "changes": {}, "updated_required": False, "failed": False, "discovered": True},
+ {"ssid": "2", "serial": "2", "password": "password", "password_valid": None, "password_set": None, "stored_password_valid": None,
+ "meta_tags": [], "controller_addresses": ["192.168.1.15", "192.168.1.16"], "embedded_available": False, "accept_certificate": False,
+ "current_info": {}, "changes": {}, "updated_required": False, "failed": False, "discovered": True}]
+ systems.systems_to_remove = ["5"]
+ systems.systems_to_add = [{"ssid": "192.168.1.36", "serial": "", "password": "password", "password_valid": None, "password_set": None,
+ "stored_password_valid": None, "meta_tags": [], "controller_addresses": ["192.168.1.35", "192.168.1.36"],
+ "embedded_available": False,
+ "accept_certificate": False, "current_info": {}, "changes": {}, "updated_required": False, "failed": False,
+ "discovered": True},
+ {"ssid": "2", "serial": "2", "password": "password", "password_valid": None, "password_set": None,
+ "stored_password_valid": None,
+ "meta_tags": [], "controller_addresses": ["192.168.1.15", "192.168.1.16"], "embedded_available": False,
+ "accept_certificate": False,
+ "current_info": {}, "changes": {}, "updated_required": False, "failed": False, "discovered": True}]
+ systems.systems_to_update = [{"ssid": "192.168.1.36", "serial": "", "password": "password", "password_valid": None, "password_set": None,
+ "stored_password_valid": None, "meta_tags": [], "controller_addresses": ["192.168.1.35", "192.168.1.36"],
+ "embedded_available": False,
+ "accept_certificate": False, "current_info": {}, "changes": {}, "updated_required": False, "failed": False,
+ "discovered": True},
+ {"ssid": "2", "serial": "2", "password": "password", "password_valid": None, "password_set": None,
+ "stored_password_valid": None,
+ "meta_tags": [], "controller_addresses": ["192.168.1.15", "192.168.1.16"], "embedded_available": False,
+ "accept_certificate": False,
+ "current_info": {}, "changes": {}, "updated_required": False, "failed": False, "discovered": True}]
+ systems.undiscovered_systems = ["5", "6"]
+ with self.assertRaises(AnsibleFailJson):
+ systems.apply()
+
+ self._set_args({"password": "password", "subnet_mask": "192.168.1.0/24", "add_discovered_systems": False,
+ "systems": []})
+ systems = NetAppESeriesProxySystems()
+ systems.is_embedded = lambda: False
+ systems.discover_array = lambda: None
+ systems.update_storage_systems_info = lambda: None
+ systems.update_system_changes = lambda x: None
+ systems.remove_system = lambda x: None
+ systems.add_system = lambda x: None
+ systems.systems = []
+ systems.systems_to_remove = []
+ systems.systems_to_add = []
+ systems.systems_to_update = []
+ with self.assertRaisesRegexp(AnsibleExitJson, "No changes were made."):
+ systems.apply()
+
+ self._set_args({"password": "password", "subnet_mask": "192.168.1.0/24", "add_discovered_systems": False,
+ "systems": []})
+ systems = NetAppESeriesProxySystems()
+ systems.is_embedded = lambda: False
+ systems.discover_array = lambda: None
+ systems.update_storage_systems_info = lambda: None
+ systems.update_system_changes = lambda x: None
+ systems.remove_system = lambda x: None
+ systems.add_system = lambda x: None
+ systems.systems = []
+ systems.systems_to_remove = []
+ systems.systems_to_add = []
+ systems.undiscovered_systems = ["5", "6"]
+ with self.assertRaises(AnsibleFailJson):
+ systems.apply()
+
+ def test_apply_fail(self):
+ """Validate apply method throws expected exceptions."""
+ self._set_args({"password": "password", "subnet_mask": "192.168.1.0/24", "add_discovered_systems": False,
+ "systems": [{"ssid": "1", "serial": "1"}, {"addresses": ["192.168.1.36"]}, {"serial": "2"}, {"serial": "5"}]})
+ systems = NetAppESeriesProxySystems()
+ systems.is_embedded = lambda: True
+ with self.assertRaisesRegexp(AnsibleFailJson, "Cannot add/remove storage systems to SANtricity Web Services Embedded instance."):
+ systems.apply()
diff --git a/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_storagepool.py b/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_storagepool.py
new file mode 100644
index 000000000..181e983ee
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_storagepool.py
@@ -0,0 +1,715 @@
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from units.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args
+from ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_storagepool import NetAppESeriesStoragePool
+from units.compat.mock import patch, PropertyMock
+
+
+class StoragePoolTest(ModuleTestCase):
+ REQUIRED_PARAMS = {"api_username": "username",
+ "api_password": "password",
+ "api_url": "http://localhost/devmgr/v2",
+ "ssid": "1",
+ "validate_certs": "no"}
+
+ STORAGE_POOL_DATA = [{"raidLevel": "raidDiskPool", "volumeGroupRef": "04000000600A098000A4B28D000017805C7BD4D8",
+ "securityType": "capable",
+ "protectionInformationCapabilities": {"protectionInformationCapable": True,
+ "protectionType": "type2Protection"},
+ "volumeGroupData": {"diskPoolData": {"reconstructionReservedDriveCount": 2}},
+ "totalRaidedSpace": "2735894167552", "name": "pool",
+ "id": "04000000600A098000A4B28D000017805C7BD4D8", "driveMediaType": "hdd"}]
+ DRIVES_DATA = [{'available': True, 'currentVolumeGroupRef': '0000000000000000000000000000000000000000',
+ 'driveMediaType': 'hdd', 'id': '010000005000C500551ED1FF0000000000000000', 'fdeCapable': True,
+ 'hotSpare': False, 'invalidDriveData': False, 'nonRedundantAccess': False, 'pfa': False,
+ 'phyDriveType': 'sas', 'protectionInformationCapabilities': {'protectionInformationCapable': True,
+ 'protectionType': 'type2Protection'},
+ 'rawCapacity': '300000000000', 'removed': False, 'status': 'optimal', 'uncertified': False,
+ 'usableCapacity': '299463129088'},
+ {'available': False, 'currentVolumeGroupRef': '04000000600A098000A4B28D000017805C7BD4D8',
+ 'driveMediaType': 'hdd', 'id': '010000005000C500551EB1930000000000000000', 'fdeCapable': True,
+ 'hotSpare': False, 'invalidDriveData': False, 'nonRedundantAccess': False, 'pfa': False,
+ 'phyDriveType': 'sas', 'protectionInformationCapabilities': {'protectionInformationCapable': True,
+ 'protectionType': 'type2Protection'},
+ 'rawCapacity': '300000000000', 'removed': False, 'status': 'optimal', 'uncertified': False,
+ 'usableCapacity': '299463129088'},
+ {'available': False, 'currentVolumeGroupRef': '04000000600A098000A4B28D000017805C7BD4D8',
+ 'driveMediaType': 'hdd', 'id': '010000005000C500551EAAE30000000000000000', 'fdeCapable': True,
+ 'hotSpare': False, 'invalidDriveData': False, 'nonRedundantAccess': False, 'pfa': False,
+ 'phyDriveType': 'sas', 'protectionInformationCapabilities': {'protectionInformationCapable': True,
+ 'protectionType': 'type2Protection'},
+ 'rawCapacity': '300000000000', 'removed': False, 'status': 'optimal', 'uncertified': False,
+ 'usableCapacity': '299463129088'},
+ {'available': False, 'currentVolumeGroupRef': '04000000600A098000A4B28D000017805C7BD4D8',
+ 'driveMediaType': 'hdd', 'id': '010000005000C500551ECB1F0000000000000000', 'fdeCapable': True,
+ 'hotSpare': False, 'invalidDriveData': False, 'nonRedundantAccess': False, 'pfa': False,
+ 'phyDriveType': 'sas', 'protectionInformationCapabilities': {'protectionInformationCapable': True,
+ 'protectionType': 'type2Protection'},
+ 'rawCapacity': '300000000000', 'removed': False, 'status': 'optimal', 'uncertified': False,
+ 'usableCapacity': '299463129088'},
+ {'available': False, 'currentVolumeGroupRef': '04000000600A098000A4B28D000017805C7BD4D8',
+ 'driveMediaType': 'hdd', 'id': '010000005000C500551EB2930000000000000000', 'fdeCapable': True,
+ 'hotSpare': False, 'invalidDriveData': False, 'nonRedundantAccess': False, 'pfa': False,
+ 'phyDriveType': 'sas', 'protectionInformationCapabilities': {'protectionInformationCapable': True,
+ 'protectionType': 'type2Protection'},
+ 'rawCapacity': '300000000000', 'removed': False, 'status': 'optimal', 'uncertified': False,
+ 'usableCapacity': '299463129088'},
+ {'available': False, 'currentVolumeGroupRef': '04000000600A098000A4B28D000017805C7BD4D8',
+ 'driveMediaType': 'hdd', 'id': '010000005000C500551ECB0B0000000000000000', 'fdeCapable': True,
+ 'hotSpare': False, 'invalidDriveData': False, 'nonRedundantAccess': False, 'pfa': False,
+ 'phyDriveType': 'sas', 'protectionInformationCapabilities': {'protectionInformationCapable': True,
+ 'protectionType': 'type2Protection'},
+ 'rawCapacity': '300000000000', 'removed': False, 'status': 'optimal', 'uncertified': False,
+ 'usableCapacity': '299463129088'},
+ {'available': False, 'currentVolumeGroupRef': '04000000600A098000A4B28D000017805C7BD4D8',
+ 'driveMediaType': 'hdd', 'id': '010000005000C500551EC6C70000000000000000', 'fdeCapable': True,
+ 'hotSpare': False, 'invalidDriveData': False, 'nonRedundantAccess': False, 'pfa': False,
+ 'phyDriveType': 'sas', 'protectionInformationCapabilities': {'protectionInformationCapable': True,
+ 'protectionType': 'type2Protection'},
+ 'rawCapacity': '300000000000', 'removed': False, 'status': 'optimal', 'uncertified': False,
+ 'usableCapacity': '299463129088'},
+ {'available': False, 'currentVolumeGroupRef': '04000000600A098000A4B28D000017805C7BD4D8',
+ 'driveMediaType': 'hdd', 'id': '010000005000C500551E9BA70000000000000000', 'fdeCapable': True,
+ 'hotSpare': False, 'invalidDriveData': False, 'nonRedundantAccess': False, 'pfa': False,
+ 'phyDriveType': 'sas', 'protectionInformationCapabilities': {'protectionInformationCapable': True,
+ 'protectionType': 'type2Protection'},
+ 'rawCapacity': '300000000000', 'removed': False, 'status': 'optimal', 'uncertified': False,
+ 'usableCapacity': '299463129088'},
+ {'available': False, 'currentVolumeGroupRef': '04000000600A098000A4B28D000017805C7BD4D8',
+ 'driveMediaType': 'hdd', 'id': '010000005000C500551ED7CF0000000000000000', 'fdeCapable': True,
+ 'hotSpare': False, 'invalidDriveData': False, 'nonRedundantAccess': False, 'pfa': False,
+ 'phyDriveType': 'sas', 'protectionInformationCapabilities': {'protectionInformationCapable': True,
+ 'protectionType': 'type2Protection'},
+ 'rawCapacity': '300000000000', 'removed': False, 'status': 'optimal', 'uncertified': False,
+ 'usableCapacity': '299463129088'},
+ {'available': False, 'currentVolumeGroupRef': '04000000600A098000A4B28D000017805C7BD4D8',
+ 'driveMediaType': 'hdd', 'id': '010000005000C500551ECB0F0000000000000000', 'fdeCapable': True,
+ 'hotSpare': False, 'invalidDriveData': False, 'nonRedundantAccess': False, 'pfa': False,
+ 'phyDriveType': 'sas', 'protectionInformationCapabilities': {'protectionInformationCapable': True,
+ 'protectionType': 'type2Protection'},
+ 'rawCapacity': '300000000000', 'removed': False, 'status': 'optimal', 'uncertified': False,
+ 'usableCapacity': '299463129088'},
+ {'available': False, 'currentVolumeGroupRef': '04000000600A098000A4B28D000017805C7BD4D8',
+ 'driveMediaType': 'hdd', 'id': '010000005000C500551E72870000000000000000', 'fdeCapable': True,
+ 'hotSpare': False, 'invalidDriveData': False, 'nonRedundantAccess': False, 'pfa': False,
+ 'phyDriveType': 'sas', 'protectionInformationCapabilities': {'protectionInformationCapable': True,
+ 'protectionType': 'type2Protection'},
+ 'rawCapacity': '300000000000', 'removed': False, 'status': 'optimal', 'uncertified': False,
+ 'usableCapacity': '299463129088'},
+ {'available': False, 'currentVolumeGroupRef': '04000000600A098000A4B28D000017805C7BD4D8',
+ 'driveMediaType': 'hdd', 'id': '010000005000C500551E9DBB0000000000000000', 'fdeCapable': True,
+ 'hotSpare': False, 'invalidDriveData': False, 'nonRedundantAccess': False, 'pfa': False,
+ 'phyDriveType': 'sas', 'protectionInformationCapabilities': {'protectionInformationCapable': True,
+ 'protectionType': 'type2Protection'},
+ 'rawCapacity': '300000000000', 'removed': False, 'status': 'optimal', 'uncertified': False,
+ 'usableCapacity': '299463129088'},
+ {'available': False, 'currentVolumeGroupRef': '04000000600A098000A4B28D000017805C7BD4D8',
+ 'driveMediaType': 'hdd', 'id': '010000005000C500551EAC230000000000000000', 'fdeCapable': True,
+ 'hotSpare': False, 'invalidDriveData': False, 'nonRedundantAccess': False, 'pfa': False,
+ 'phyDriveType': 'sas', 'protectionInformationCapabilities': {'protectionInformationCapable': True,
+ 'protectionType': 'type2Protection'},
+ 'rawCapacity': '300000000000', 'removed': False, 'status': 'optimal', 'uncertified': False,
+ 'usableCapacity': '299463129088'},
+ {'available': False, 'currentVolumeGroupRef': '04000000600A098000A4B28D000017805C7BD4D8',
+ 'driveMediaType': 'hdd', 'id': '010000005000C500551EA0BB0000000000000000', 'fdeCapable': True,
+ 'hotSpare': False, 'invalidDriveData': False, 'nonRedundantAccess': False, 'pfa': False,
+ 'phyDriveType': 'sas', 'protectionInformationCapabilities': {'protectionInformationCapable': True,
+ 'protectionType': 'type2Protection'},
+ 'rawCapacity': '300000000000', 'removed': False, 'status': 'optimal', 'uncertified': False,
+ 'usableCapacity': '299463129088'},
+ {'available': False, 'currentVolumeGroupRef': '04000000600A098000A4B28D000017805C7BD4D8',
+ 'driveMediaType': 'hdd', 'id': '010000005000C500551EAC4B0000000000000000', 'fdeCapable': True,
+ 'hotSpare': False, 'invalidDriveData': False, 'nonRedundantAccess': False, 'pfa': False,
+ 'phyDriveType': 'sas', 'protectionInformationCapabilities': {'protectionInformationCapable': True,
+ 'protectionType': 'type2Protection'},
+ 'rawCapacity': '300000000000', 'removed': False, 'status': 'optimal', 'uncertified': False,
+ 'usableCapacity': '299463129088'},
+ {'available': True, 'currentVolumeGroupRef': '0000000000000000000000000000000000000000',
+ 'driveMediaType': 'hdd', 'id': '010000005000C500551E7F2B0000000000000000', 'fdeCapable': True,
+ 'hotSpare': False, 'invalidDriveData': False, 'nonRedundantAccess': False, 'pfa': False,
+ 'phyDriveType': 'sas', 'protectionInformationCapabilities': {'protectionInformationCapable': True,
+ 'protectionType': 'type2Protection'},
+ 'rawCapacity': '300000000000', 'removed': False, 'status': 'optimal', 'uncertified': False,
+ 'usableCapacity': '299463129088'},
+ {'available': True, 'currentVolumeGroupRef': '0000000000000000000000000000000000000000',
+ 'driveMediaType': 'hdd', 'id': '010000005000C500551EC9270000000000000000', 'fdeCapable': True,
+ 'hotSpare': False, 'invalidDriveData': False, 'nonRedundantAccess': False, 'pfa': False,
+ 'phyDriveType': 'sas', 'protectionInformationCapabilities': {'protectionInformationCapable': True,
+ 'protectionType': 'type2Protection'},
+ 'rawCapacity': '300000000000', 'removed': False, 'status': 'optimal', 'uncertified': False,
+ 'usableCapacity': '299463129088'},
+ {'available': True, 'currentVolumeGroupRef': '0000000000000000000000000000000000000000',
+ 'driveMediaType': 'hdd', 'id': '010000005000C500551EC97F0000000000000000', 'fdeCapable': True,
+ 'hotSpare': False, 'invalidDriveData': False, 'nonRedundantAccess': False, 'pfa': False,
+ 'phyDriveType': 'sas', 'protectionInformationCapabilities': {'protectionInformationCapable': True,
+ 'protectionType': 'type2Protection'},
+ 'rawCapacity': '300000000000', 'removed': False, 'status': 'optimal', 'uncertified': False,
+ 'usableCapacity': '299463129088'},
+ {'available': True, 'currentVolumeGroupRef': '0000000000000000000000000000000000000000',
+ 'driveMediaType': 'hdd', 'id': '010000005000C500551ECBFF0000000000000000', 'fdeCapable': True,
+ 'hotSpare': False, 'invalidDriveData': False, 'nonRedundantAccess': False, 'pfa': False,
+ 'phyDriveType': 'sas', 'protectionInformationCapabilities': {'protectionInformationCapable': True,
+ 'protectionType': 'type2Protection'},
+ 'rawCapacity': '300000000000', 'removed': False, 'status': 'optimal', 'uncertified': False,
+ 'usableCapacity': '299463129088'},
+ {'available': True, 'currentVolumeGroupRef': '0000000000000000000000000000000000000000',
+ 'driveMediaType': 'hdd', 'id': '010000005000C500551E9ED30000000000000000', 'fdeCapable': True,
+ 'hotSpare': False, 'invalidDriveData': False, 'nonRedundantAccess': False, 'pfa': False,
+ 'phyDriveType': 'sas', 'protectionInformationCapabilities': {'protectionInformationCapable': True,
+ 'protectionType': 'type2Protection'},
+ 'rawCapacity': '300000000000', 'removed': False, 'status': 'optimal', 'uncertified': False,
+ 'usableCapacity': '299463129088'},
+ {'available': True, 'currentVolumeGroupRef': '0000000000000000000000000000000000000000',
+ 'driveMediaType': 'hdd', 'id': '010000005000C500551EA4CF0000000000000000', 'fdeCapable': True,
+ 'hotSpare': False, 'invalidDriveData': False, 'nonRedundantAccess': False, 'pfa': False,
+ 'phyDriveType': 'sas', 'protectionInformationCapabilities': {'protectionInformationCapable': True,
+ 'protectionType': 'type2Protection'},
+ 'rawCapacity': '300000000000', 'removed': False, 'status': 'optimal', 'uncertified': False,
+ 'usableCapacity': '299463129088'},
+ {'available': True, 'currentVolumeGroupRef': '0000000000000000000000000000000000000000',
+ 'driveMediaType': 'hdd', 'id': '010000005000C500551EA29F0000000000000000', 'fdeCapable': True,
+ 'hotSpare': False, 'invalidDriveData': False, 'nonRedundantAccess': False, 'pfa': False,
+ 'phyDriveType': 'sas', 'protectionInformationCapabilities': {'protectionInformationCapable': True,
+ 'protectionType': 'type2Protection'},
+ 'rawCapacity': '300000000000', 'removed': False, 'status': 'optimal', 'uncertified': False,
+ 'usableCapacity': '299463129088'},
+ {'available': True, 'currentVolumeGroupRef': '0000000000000000000000000000000000000000',
+ 'driveMediaType': 'hdd', 'id': '010000005000C500551ECDFB0000000000000000', 'fdeCapable': True,
+ 'hotSpare': False, 'invalidDriveData': False, 'nonRedundantAccess': False, 'pfa': False,
+ 'phyDriveType': 'sas', 'protectionInformationCapabilities': {'protectionInformationCapable': True,
+ 'protectionType': 'type2Protection'},
+ 'rawCapacity': '300000000000', 'removed': False, 'status': 'optimal', 'uncertified': False,
+ 'usableCapacity': '299463129088'},
+ {'available': True, 'currentVolumeGroupRef': '0000000000000000000000000000000000000000',
+ 'driveMediaType': 'hdd', 'id': '010000005000C500551E99230000000000000000', 'fdeCapable': True,
+ 'hotSpare': False, 'invalidDriveData': False, 'nonRedundantAccess': False, 'pfa': False,
+ 'phyDriveType': 'sas', 'protectionInformationCapabilities': {'protectionInformationCapable': True,
+ 'protectionType': 'type2Protection'},
+ 'rawCapacity': '300000000000', 'removed': False, 'status': 'optimal', 'uncertified': False,
+ 'usableCapacity': '299463129088'},
+ {'available': True, 'currentVolumeGroupRef': '0000000000000000000000000000000000000000',
+ 'driveMediaType': 'ssd', 'id': '010000005000C500551E9ED31000000000000000', 'fdeCapable': True,
+ 'hotSpare': False, 'invalidDriveData': False, 'nonRedundantAccess': False, 'pfa': False,
+ 'phyDriveType': 'sas', 'protectionInformationCapabilities': {'protectionInformationCapable': True,
+ 'protectionType': 'type2Protection'},
+ 'rawCapacity': '300000000000', 'removed': False, 'status': 'optimal', 'uncertified': False,
+ 'usableCapacity': '299463129088'},
+ {'available': True, 'currentVolumeGroupRef': '0000000000000000000000000000000000000000',
+ 'driveMediaType': 'ssd', 'id': '010000005000C500551EA4CF2000000000000000', 'fdeCapable': True,
+ 'hotSpare': False, 'invalidDriveData': False, 'nonRedundantAccess': False, 'pfa': False,
+ 'phyDriveType': 'sas', 'protectionInformationCapabilities': {'protectionInformationCapable': True,
+ 'protectionType': 'type2Protection'},
+ 'rawCapacity': '300000000000', 'removed': False, 'status': 'optimal', 'uncertified': False,
+ 'usableCapacity': '299463129088'},
+ {'available': True, 'currentVolumeGroupRef': '0000000000000000000000000000000000000000',
+ 'driveMediaType': 'ssd', 'id': '010000005000C500551EA29F3000000000000000', 'fdeCapable': True,
+ 'hotSpare': False, 'invalidDriveData': False, 'nonRedundantAccess': False, 'pfa': False,
+ 'phyDriveType': 'sas', 'protectionInformationCapabilities': {'protectionInformationCapable': True,
+ 'protectionType': 'type2Protection'},
+ 'rawCapacity': '300000000000', 'removed': False, 'status': 'optimal', 'uncertified': False,
+ 'usableCapacity': '299463129088'},
+ {'available': True, 'currentVolumeGroupRef': '0000000000000000000000000000000000000000',
+ 'driveMediaType': 'ssd', 'id': '010000005000C500551ECDFB4000000000000000', 'fdeCapable': True,
+ 'hotSpare': False, 'invalidDriveData': False, 'nonRedundantAccess': False, 'pfa': False,
+ 'phyDriveType': 'sas', 'protectionInformationCapabilities': {'protectionInformationCapable': True,
+ 'protectionType': 'type2Protection'},
+ 'rawCapacity': '300000000000', 'removed': False, 'status': 'optimal', 'uncertified': False,
+ 'usableCapacity': '299463129088'},
+ {'available': True, 'currentVolumeGroupRef': '0000000000000000000000000000000000000000',
+ 'driveMediaType': 'ssd', 'id': '010000005000C500551E99235000000000000000', 'fdeCapable': True,
+ 'hotSpare': False, 'invalidDriveData': False, 'nonRedundantAccess': False, 'pfa': False,
+ 'phyDriveType': 'sata', 'protectionInformationCapabilities': {'protectionInformationCapable': True,
+ 'protectionType': 'type2Protection'},
+ 'rawCapacity': '300000000000', 'removed': False, 'status': 'optimal', 'uncertified': False,
+ 'usableCapacity': '299463129088'}]
+ RAID6_CANDIDATE_DRIVES = {"volumeCandidate": [
+ {"raidLevel": "raid6", "trayLossProtection": False, "rawSize": "898389368832", "usableSize": "898388459520",
+ "driveCount": 5, "freeExtentRef": "0000000000000000000000000000000000000000", "driveRefList": {
+ "driveRef": ["010000005000C500551E7F2B0000000000000000", "010000005000C500551EC9270000000000000000",
+ "010000005000C500551EC97F0000000000000000", "010000005000C500551ECBFF0000000000000000",
+ "010000005000C500551E9ED30000000000000000"]}, "candidateSelectionType": "count",
+ "spindleSpeedMatch": True, "spindleSpeed": 10000, "phyDriveType": "sas", "dssPreallocEnabled": False,
+ "securityType": "capable", "drawerLossProtection": False, "driveMediaType": "hdd",
+ "protectionInformationCapable": False,
+ "protectionInformationCapabilities": {"protectionInformationCapable": True,
+ "protectionType": "type2Protection"},
+ "volumeCandidateData": {"type": "traditional", "diskPoolVolumeCandidateData": None},
+ "driveBlockFormat": "allNative", "allocateReservedSpace": False, "securityLevel": "fde"},
+ {"raidLevel": "raid6", "trayLossProtection": False, "rawSize": "1197852491776", "usableSize": "1197851279360",
+ "driveCount": 6, "freeExtentRef": "0000000000000000000000000000000000000000", "driveRefList": {
+ "driveRef": ["010000005000C500551E7F2B0000000000000000", "010000005000C500551EC9270000000000000000",
+ "010000005000C500551EC97F0000000000000000", "010000005000C500551ECBFF0000000000000000",
+ "010000005000C500551E9ED30000000000000000", "010000005000C500551EA4CF0000000000000000"]},
+ "candidateSelectionType": "count", "spindleSpeedMatch": True, "spindleSpeed": 10000, "phyDriveType": "sas",
+ "dssPreallocEnabled": False, "securityType": "capable", "drawerLossProtection": False, "driveMediaType": "hdd",
+ "protectionInformationCapable": False,
+ "protectionInformationCapabilities": {"protectionInformationCapable": True,
+ "protectionType": "type2Protection"},
+ "volumeCandidateData": {"type": "traditional", "diskPoolVolumeCandidateData": None},
+ "driveBlockFormat": "allNative", "allocateReservedSpace": False, "securityLevel": "fde"},
+ {"raidLevel": "raid6", "trayLossProtection": False, "rawSize": "1497315614720", "usableSize": "1497314099200",
+ "driveCount": 7, "freeExtentRef": "0000000000000000000000000000000000000000", "driveRefList": {
+ "driveRef": ["010000005000C500551E7F2B0000000000000000", "010000005000C500551EC9270000000000000000",
+ "010000005000C500551EC97F0000000000000000", "010000005000C500551ECBFF0000000000000000",
+ "010000005000C500551E9ED30000000000000000", "010000005000C500551EA4CF0000000000000000",
+ "010000005000C500551ED1FF0000000000000000"]}, "candidateSelectionType": "count",
+ "spindleSpeedMatch": True, "spindleSpeed": 10000, "phyDriveType": "sas", "dssPreallocEnabled": False,
+ "securityType": "capable", "drawerLossProtection": False, "driveMediaType": "hdd",
+ "protectionInformationCapable": False,
+ "protectionInformationCapabilities": {"protectionInformationCapable": True,
+ "protectionType": "type2Protection"},
+ "volumeCandidateData": {"type": "traditional", "diskPoolVolumeCandidateData": None},
+ "driveBlockFormat": "allNative", "allocateReservedSpace": False, "securityLevel": "fde"},
+ {"raidLevel": "raid6", "trayLossProtection": False, "rawSize": "1796778737664", "usableSize": "1796776919040",
+ "driveCount": 8, "freeExtentRef": "0000000000000000000000000000000000000000", "driveRefList": {
+ "driveRef": ["010000005000C500551E7F2B0000000000000000", "010000005000C500551EC9270000000000000000",
+ "010000005000C500551EC97F0000000000000000", "010000005000C500551ECBFF0000000000000000",
+ "010000005000C500551E9ED30000000000000000", "010000005000C500551EA4CF0000000000000000",
+ "010000005000C500551ED1FF0000000000000000", "010000005000C500551EA29F0000000000000000"]},
+ "candidateSelectionType": "count", "spindleSpeedMatch": True, "spindleSpeed": 10000, "phyDriveType": "sas",
+ "dssPreallocEnabled": False, "securityType": "capable", "drawerLossProtection": False, "driveMediaType": "hdd",
+ "protectionInformationCapable": False,
+ "protectionInformationCapabilities": {"protectionInformationCapable": True,
+ "protectionType": "type2Protection"},
+ "volumeCandidateData": {"type": "traditional", "diskPoolVolumeCandidateData": None},
+ "driveBlockFormat": "allNative", "allocateReservedSpace": False, "securityLevel": "fde"},
+ {"raidLevel": "raid6", "trayLossProtection": False, "rawSize": "2096241860608", "usableSize": "2096239738880",
+ "driveCount": 9, "freeExtentRef": "0000000000000000000000000000000000000000", "driveRefList": {
+ "driveRef": ["010000005000C500551E7F2B0000000000000000", "010000005000C500551EC9270000000000000000",
+ "010000005000C500551EC97F0000000000000000", "010000005000C500551ECBFF0000000000000000",
+ "010000005000C500551E9ED30000000000000000", "010000005000C500551EA4CF0000000000000000",
+ "010000005000C500551ED1FF0000000000000000", "010000005000C500551EA29F0000000000000000",
+ "010000005000C500551ECDFB0000000000000000"]}, "candidateSelectionType": "count",
+ "spindleSpeedMatch": True, "spindleSpeed": 10000, "phyDriveType": "sas", "dssPreallocEnabled": False,
+ "securityType": "capable", "drawerLossProtection": False, "driveMediaType": "hdd",
+ "protectionInformationCapable": False,
+ "protectionInformationCapabilities": {"protectionInformationCapable": True,
+ "protectionType": "type2Protection"},
+ "volumeCandidateData": {"type": "traditional", "diskPoolVolumeCandidateData": None},
+ "driveBlockFormat": "allNative", "allocateReservedSpace": False, "securityLevel": "fde"},
+ {"raidLevel": "raid6", "trayLossProtection": False, "rawSize": "2395704983552", "usableSize": "2395702558720",
+ "driveCount": 10, "freeExtentRef": "0000000000000000000000000000000000000000", "driveRefList": {
+ "driveRef": ["010000005000C500551E7F2B0000000000000000", "010000005000C500551EC9270000000000000000",
+ "010000005000C500551EC97F0000000000000000", "010000005000C500551ECBFF0000000000000000",
+ "010000005000C500551E9ED30000000000000000", "010000005000C500551EA4CF0000000000000000",
+ "010000005000C500551ED1FF0000000000000000", "010000005000C500551EA29F0000000000000000",
+ "010000005000C500551ECDFB0000000000000000", "010000005000C500551E99230000000000000000"]},
+ "candidateSelectionType": "count", "spindleSpeedMatch": True, "spindleSpeed": 10000, "phyDriveType": "sas",
+ "dssPreallocEnabled": False, "securityType": "capable", "drawerLossProtection": False, "driveMediaType": "hdd",
+ "protectionInformationCapable": False,
+ "protectionInformationCapabilities": {"protectionInformationCapable": True,
+ "protectionType": "type2Protection"},
+ "volumeCandidateData": {"type": "traditional", "diskPoolVolumeCandidateData": None},
+ "driveBlockFormat": "allNative", "allocateReservedSpace": False, "securityLevel": "fde"}], "returnCode": "ok"}
+ EXPANSION_DDP_DRIVES_LIST = ["010000005000C500551ED1FF0000000000000000", "010000005000C500551E7F2B0000000000000000",
+ "010000005000C500551EC9270000000000000000", "010000005000C500551EC97F0000000000000000",
+ "010000005000C500551ECBFF0000000000000000", "010000005000C500551E9ED30000000000000000",
+ "010000005000C500551EA4CF0000000000000000", "010000005000C500551EA29F0000000000000000",
+ "010000005000C500551ECDFB0000000000000000", "010000005000C500551E99230000000000000000",
+ "010000005000C500551E9ED31000000000000000", "010000005000C500551EA4CF2000000000000000",
+ "010000005000C500551EA29F3000000000000000", "010000005000C500551ECDFB4000000000000000",
+ "010000005000C500551E99235000000000000000"]
+ EXPANSION_DDP_DRIVE_DATA = {"returnCode": "ok", "candidates": [
+ {"drives": ["010000005000C500551E7F2B0000000000000000"], "trayLossProtection": False, "wastedCapacity": "0",
+ "spindleSpeedMatch": True, "drawerLossProtection": False, "usableCapacity": "299463129088",
+ "driveBlockFormat": "allNative"},
+ {"drives": ["010000005000C500551E7F2B0000000000000000", "010000005000C500551E99230000000000000000"],
+ "trayLossProtection": False, "wastedCapacity": "0", "spindleSpeedMatch": True, "drawerLossProtection": False,
+ "usableCapacity": "598926258176", "driveBlockFormat": "allNative"},
+ {"drives": ["010000005000C500551E7F2B0000000000000000", "010000005000C500551E99230000000000000000",
+ "010000005000C500551E9ED30000000000000000"], "trayLossProtection": False, "wastedCapacity": "0",
+ "spindleSpeedMatch": True, "drawerLossProtection": False, "usableCapacity": "898389387264",
+ "driveBlockFormat": "allNative"},
+ {"drives": ["010000005000C500551E7F2B0000000000000000", "010000005000C500551E99230000000000000000",
+ "010000005000C500551E9ED30000000000000000", "010000005000C500551EA29F0000000000000000"],
+ "trayLossProtection": False, "wastedCapacity": "0", "spindleSpeedMatch": True, "drawerLossProtection": False,
+ "usableCapacity": "1197852516352", "driveBlockFormat": "allNative"},
+ {"drives": ["010000005000C500551E7F2B0000000000000000", "010000005000C500551E99230000000000000000",
+ "010000005000C500551E9ED30000000000000000", "010000005000C500551EA29F0000000000000000",
+ "010000005000C500551EA4CF0000000000000000"], "trayLossProtection": False, "wastedCapacity": "0",
+ "spindleSpeedMatch": True, "drawerLossProtection": False, "usableCapacity": "1497315645440",
+ "driveBlockFormat": "allNative"},
+ {"drives": ["010000005000C500551E7F2B0000000000000000", "010000005000C500551E99230000000000000000",
+ "010000005000C500551E9ED30000000000000000", "010000005000C500551EA29F0000000000000000",
+ "010000005000C500551EA4CF0000000000000000", "010000005000C500551EC9270000000000000000"],
+ "trayLossProtection": False, "wastedCapacity": "0", "spindleSpeedMatch": True, "drawerLossProtection": False,
+ "usableCapacity": "1796778774528", "driveBlockFormat": "allNative"},
+ {"drives": ["010000005000C500551E7F2B0000000000000000", "010000005000C500551E99230000000000000000",
+ "010000005000C500551E9ED30000000000000000", "010000005000C500551EA29F0000000000000000",
+ "010000005000C500551EA4CF0000000000000000", "010000005000C500551EC9270000000000000000",
+ "010000005000C500551EC97F0000000000000000"], "trayLossProtection": False, "wastedCapacity": "0",
+ "spindleSpeedMatch": True, "drawerLossProtection": False, "usableCapacity": "2096241903616",
+ "driveBlockFormat": "allNative"},
+ {"drives": ["010000005000C500551E7F2B0000000000000000", "010000005000C500551E99230000000000000000",
+ "010000005000C500551E9ED30000000000000000", "010000005000C500551EA29F0000000000000000",
+ "010000005000C500551EA4CF0000000000000000", "010000005000C500551EC9270000000000000000",
+ "010000005000C500551EC97F0000000000000000", "010000005000C500551ECBFF0000000000000000"],
+ "trayLossProtection": False, "wastedCapacity": "0", "spindleSpeedMatch": True, "drawerLossProtection": False,
+ "usableCapacity": "2395705032704", "driveBlockFormat": "allNative"},
+ {"drives": ["010000005000C500551E7F2B0000000000000000", "010000005000C500551E99230000000000000000",
+ "010000005000C500551E9ED30000000000000000", "010000005000C500551EA29F0000000000000000",
+ "010000005000C500551EA4CF0000000000000000", "010000005000C500551EC9270000000000000000",
+ "010000005000C500551EC97F0000000000000000", "010000005000C500551ECBFF0000000000000000",
+ "010000005000C500551ECDFB0000000000000000"], "trayLossProtection": False, "wastedCapacity": "0",
+ "spindleSpeedMatch": True, "drawerLossProtection": False, "usableCapacity": "2695168161792",
+ "driveBlockFormat": "allNative"},
+ {"drives": ["010000005000C500551E7F2B0000000000000000", "010000005000C500551E99230000000000000000",
+ "010000005000C500551E9ED30000000000000000", "010000005000C500551EA29F0000000000000000",
+ "010000005000C500551EA4CF0000000000000000", "010000005000C500551EC9270000000000000000",
+ "010000005000C500551EC97F0000000000000000", "010000005000C500551ECBFF0000000000000000",
+ "010000005000C500551ECDFB0000000000000000", "010000005000C500551ED1FF0000000000000000"],
+ "trayLossProtection": False, "wastedCapacity": "0", "spindleSpeedMatch": True, "drawerLossProtection": False,
+ "usableCapacity": "2994631290880", "driveBlockFormat": "allNative"}]}
+
+ REQUEST_FUNC = "ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_storagepool.request"
+ NETAPP_REQUEST_FUNC = "ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity.NetAppESeriesModule.request"
+ DRIVES_PROPERTY = "ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_storagepool.NetAppESeriesStoragePool.drives"
+ STORAGE_POOL_PROPERTY = "ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_storagepool.NetAppESeriesStoragePool.storage_pool"
+
+ def _set_args(self, args=None):
+ module_args = self.REQUIRED_PARAMS.copy()
+ if args is not None:
+ module_args.update(args)
+ set_module_args(module_args)
+
+ def _initialize_dummy_instance(self, alt_args=None):
+ """Initialize a dummy instance of NetAppESeriesStoragePool for the purpose of testing individual methods."""
+ args = {"state": "absent", "name": "storage_pool"}
+ if alt_args:
+ args.update(alt_args)
+ self._set_args(args)
+ return NetAppESeriesStoragePool()
+
+ def test_drives_fail(self):
+ """Verify exception is thrown."""
+
+ with patch(self.NETAPP_REQUEST_FUNC) as netapp_request:
+ netapp_request.return_value = Exception()
+ storagepool = self._initialize_dummy_instance()
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to fetch disk drives."):
+ drives = storagepool.drives
+
+ def test_available_drives(self):
+ """Verify all drives returned are available"""
+ with patch(self.DRIVES_PROPERTY, new_callable=PropertyMock) as drives:
+ drives.return_value = self.DRIVES_DATA
+
+ storagepool = self._initialize_dummy_instance()
+ self.assertEqual(storagepool.available_drives,
+ ['010000005000C500551ED1FF0000000000000000', '010000005000C500551E7F2B0000000000000000',
+ '010000005000C500551EC9270000000000000000', '010000005000C500551EC97F0000000000000000',
+ '010000005000C500551ECBFF0000000000000000', '010000005000C500551E9ED30000000000000000',
+ '010000005000C500551EA4CF0000000000000000', '010000005000C500551EA29F0000000000000000',
+ '010000005000C500551ECDFB0000000000000000', '010000005000C500551E99230000000000000000',
+ '010000005000C500551E9ED31000000000000000', '010000005000C500551EA4CF2000000000000000',
+ '010000005000C500551EA29F3000000000000000', '010000005000C500551ECDFB4000000000000000',
+ '010000005000C500551E99235000000000000000'])
+
+ def test_available_drive_types(self):
+ """Verify all drive types are returned in most common first order."""
+ with patch(self.DRIVES_PROPERTY, new_callable=PropertyMock) as drives:
+ drives.return_value = self.DRIVES_DATA
+
+ storagepool = self._initialize_dummy_instance()
+ self.assertEqual(storagepool.available_drive_types[0], "hdd")
+ self.assertEqual(storagepool.available_drive_types[1], "ssd")
+
+ def test_available_drive_interface_types(self):
+ """Verify all interface types are returned in most common first order."""
+ with patch(self.DRIVES_PROPERTY, new_callable=PropertyMock) as drives:
+ drives.return_value = self.DRIVES_DATA
+
+ storagepool = self._initialize_dummy_instance()
+ self.assertEqual(storagepool.available_drive_interface_types[0], "sas")
+ self.assertEqual(storagepool.available_drive_interface_types[1], "sata")
+
+ def test_storage_pool_drives(self):
+ """Verify storage pool drive collection."""
+ with patch(self.DRIVES_PROPERTY, new_callable=PropertyMock) as drives:
+ drives.return_value = self.DRIVES_DATA
+
+ storagepool = self._initialize_dummy_instance(
+ {"state": "present", "name": "pool", "criteria_drive_count": "12", "raid_level": "raidDiskPool"})
+ storagepool.pool_detail = self.STORAGE_POOL_DATA[0]
+ self.assertEqual(storagepool.storage_pool_drives, [
+ {'available': False, 'pfa': False, 'driveMediaType': 'hdd', 'uncertified': False,
+ 'protectionInformationCapabilities': {'protectionInformationCapable': True,
+ 'protectionType': 'type2Protection'}, 'fdeCapable': True,
+ 'currentVolumeGroupRef': '04000000600A098000A4B28D000017805C7BD4D8', 'invalidDriveData': False,
+ 'nonRedundantAccess': False, 'hotSpare': False, 'status': 'optimal', 'rawCapacity': '300000000000',
+ 'usableCapacity': '299463129088', 'phyDriveType': 'sas', 'removed': False,
+ 'id': '010000005000C500551EB1930000000000000000'},
+ {'available': False, 'pfa': False, 'driveMediaType': 'hdd', 'uncertified': False,
+ 'protectionInformationCapabilities': {'protectionInformationCapable': True,
+ 'protectionType': 'type2Protection'}, 'fdeCapable': True,
+ 'currentVolumeGroupRef': '04000000600A098000A4B28D000017805C7BD4D8', 'invalidDriveData': False,
+ 'nonRedundantAccess': False, 'hotSpare': False, 'status': 'optimal', 'rawCapacity': '300000000000',
+ 'usableCapacity': '299463129088', 'phyDriveType': 'sas', 'removed': False,
+ 'id': '010000005000C500551EAAE30000000000000000'},
+ {'available': False, 'pfa': False, 'driveMediaType': 'hdd', 'uncertified': False,
+ 'protectionInformationCapabilities': {'protectionInformationCapable': True,
+ 'protectionType': 'type2Protection'}, 'fdeCapable': True,
+ 'currentVolumeGroupRef': '04000000600A098000A4B28D000017805C7BD4D8', 'invalidDriveData': False,
+ 'nonRedundantAccess': False, 'hotSpare': False, 'status': 'optimal', 'rawCapacity': '300000000000',
+ 'usableCapacity': '299463129088', 'phyDriveType': 'sas', 'removed': False,
+ 'id': '010000005000C500551ECB1F0000000000000000'},
+ {'available': False, 'pfa': False, 'driveMediaType': 'hdd', 'uncertified': False,
+ 'protectionInformationCapabilities': {'protectionInformationCapable': True,
+ 'protectionType': 'type2Protection'}, 'fdeCapable': True,
+ 'currentVolumeGroupRef': '04000000600A098000A4B28D000017805C7BD4D8', 'invalidDriveData': False,
+ 'nonRedundantAccess': False, 'hotSpare': False, 'status': 'optimal', 'rawCapacity': '300000000000',
+ 'usableCapacity': '299463129088', 'phyDriveType': 'sas', 'removed': False,
+ 'id': '010000005000C500551EB2930000000000000000'},
+ {'available': False, 'pfa': False, 'driveMediaType': 'hdd', 'uncertified': False,
+ 'protectionInformationCapabilities': {'protectionInformationCapable': True,
+ 'protectionType': 'type2Protection'}, 'fdeCapable': True,
+ 'currentVolumeGroupRef': '04000000600A098000A4B28D000017805C7BD4D8', 'invalidDriveData': False,
+ 'nonRedundantAccess': False, 'hotSpare': False, 'status': 'optimal', 'rawCapacity': '300000000000',
+ 'usableCapacity': '299463129088', 'phyDriveType': 'sas', 'removed': False,
+ 'id': '010000005000C500551ECB0B0000000000000000'},
+ {'available': False, 'pfa': False, 'driveMediaType': 'hdd', 'uncertified': False,
+ 'protectionInformationCapabilities': {'protectionInformationCapable': True,
+ 'protectionType': 'type2Protection'}, 'fdeCapable': True,
+ 'currentVolumeGroupRef': '04000000600A098000A4B28D000017805C7BD4D8', 'invalidDriveData': False,
+ 'nonRedundantAccess': False, 'hotSpare': False, 'status': 'optimal', 'rawCapacity': '300000000000',
+ 'usableCapacity': '299463129088', 'phyDriveType': 'sas', 'removed': False,
+ 'id': '010000005000C500551EC6C70000000000000000'},
+ {'available': False, 'pfa': False, 'driveMediaType': 'hdd', 'uncertified': False,
+ 'protectionInformationCapabilities': {'protectionInformationCapable': True,
+ 'protectionType': 'type2Protection'}, 'fdeCapable': True,
+ 'currentVolumeGroupRef': '04000000600A098000A4B28D000017805C7BD4D8', 'invalidDriveData': False,
+ 'nonRedundantAccess': False, 'hotSpare': False, 'status': 'optimal', 'rawCapacity': '300000000000',
+ 'usableCapacity': '299463129088', 'phyDriveType': 'sas', 'removed': False,
+ 'id': '010000005000C500551E9BA70000000000000000'},
+ {'available': False, 'pfa': False, 'driveMediaType': 'hdd', 'uncertified': False,
+ 'protectionInformationCapabilities': {'protectionInformationCapable': True,
+ 'protectionType': 'type2Protection'}, 'fdeCapable': True,
+ 'currentVolumeGroupRef': '04000000600A098000A4B28D000017805C7BD4D8', 'invalidDriveData': False,
+ 'nonRedundantAccess': False, 'hotSpare': False, 'status': 'optimal', 'rawCapacity': '300000000000',
+ 'usableCapacity': '299463129088', 'phyDriveType': 'sas', 'removed': False,
+ 'id': '010000005000C500551ED7CF0000000000000000'},
+ {'available': False, 'pfa': False, 'driveMediaType': 'hdd', 'uncertified': False,
+ 'protectionInformationCapabilities': {'protectionInformationCapable': True,
+ 'protectionType': 'type2Protection'}, 'fdeCapable': True,
+ 'currentVolumeGroupRef': '04000000600A098000A4B28D000017805C7BD4D8', 'invalidDriveData': False,
+ 'nonRedundantAccess': False, 'hotSpare': False, 'status': 'optimal', 'rawCapacity': '300000000000',
+ 'usableCapacity': '299463129088', 'phyDriveType': 'sas', 'removed': False,
+ 'id': '010000005000C500551ECB0F0000000000000000'},
+ {'available': False, 'pfa': False, 'driveMediaType': 'hdd', 'uncertified': False,
+ 'protectionInformationCapabilities': {'protectionInformationCapable': True,
+ 'protectionType': 'type2Protection'}, 'fdeCapable': True,
+ 'currentVolumeGroupRef': '04000000600A098000A4B28D000017805C7BD4D8', 'invalidDriveData': False,
+ 'nonRedundantAccess': False, 'hotSpare': False, 'status': 'optimal', 'rawCapacity': '300000000000',
+ 'usableCapacity': '299463129088', 'phyDriveType': 'sas', 'removed': False,
+ 'id': '010000005000C500551E72870000000000000000'},
+ {'available': False, 'pfa': False, 'driveMediaType': 'hdd', 'uncertified': False,
+ 'protectionInformationCapabilities': {'protectionInformationCapable': True,
+ 'protectionType': 'type2Protection'}, 'fdeCapable': True,
+ 'currentVolumeGroupRef': '04000000600A098000A4B28D000017805C7BD4D8', 'invalidDriveData': False,
+ 'nonRedundantAccess': False, 'hotSpare': False, 'status': 'optimal', 'rawCapacity': '300000000000',
+ 'usableCapacity': '299463129088', 'phyDriveType': 'sas', 'removed': False,
+ 'id': '010000005000C500551E9DBB0000000000000000'},
+ {'available': False, 'pfa': False, 'driveMediaType': 'hdd', 'uncertified': False,
+ 'protectionInformationCapabilities': {'protectionInformationCapable': True,
+ 'protectionType': 'type2Protection'}, 'fdeCapable': True,
+ 'currentVolumeGroupRef': '04000000600A098000A4B28D000017805C7BD4D8', 'invalidDriveData': False,
+ 'nonRedundantAccess': False, 'hotSpare': False, 'status': 'optimal', 'rawCapacity': '300000000000',
+ 'usableCapacity': '299463129088', 'phyDriveType': 'sas', 'removed': False,
+ 'id': '010000005000C500551EAC230000000000000000'},
+ {'available': False, 'pfa': False, 'driveMediaType': 'hdd', 'uncertified': False,
+ 'protectionInformationCapabilities': {'protectionInformationCapable': True,
+ 'protectionType': 'type2Protection'}, 'fdeCapable': True,
+ 'currentVolumeGroupRef': '04000000600A098000A4B28D000017805C7BD4D8', 'invalidDriveData': False,
+ 'nonRedundantAccess': False, 'hotSpare': False, 'status': 'optimal', 'rawCapacity': '300000000000',
+ 'usableCapacity': '299463129088', 'phyDriveType': 'sas', 'removed': False,
+ 'id': '010000005000C500551EA0BB0000000000000000'},
+ {'available': False, 'pfa': False, 'driveMediaType': 'hdd', 'uncertified': False,
+ 'protectionInformationCapabilities': {'protectionInformationCapable': True,
+ 'protectionType': 'type2Protection'}, 'fdeCapable': True,
+ 'currentVolumeGroupRef': '04000000600A098000A4B28D000017805C7BD4D8', 'invalidDriveData': False,
+ 'nonRedundantAccess': False, 'hotSpare': False, 'status': 'optimal', 'rawCapacity': '300000000000',
+ 'usableCapacity': '299463129088', 'phyDriveType': 'sas', 'removed': False,
+ 'id': '010000005000C500551EAC4B0000000000000000'}])
+
+ def test_get_ddp_capacity(self):
+ """Evaluate returned capacity from get_ddp_capacity method."""
+ with patch(self.DRIVES_PROPERTY, new_callable=PropertyMock) as drives:
+ drives.return_value = self.DRIVES_DATA
+
+ storagepool = self._initialize_dummy_instance(
+ {"state": "present", "name": "pool", "criteria_drive_count": "12", "raid_level": "raidDiskPool"})
+ storagepool.pool_detail = self.STORAGE_POOL_DATA[0]
+ self.assertAlmostEqual(storagepool.get_ddp_capacity(self.EXPANSION_DDP_DRIVES_LIST), 6038680353645,
+ places=-2) # Allows for python version/architecture computational differences
+
+ def test_get_candidate_drives(self):
+ """Verify correct candidate list is returned."""
+ with patch(self.NETAPP_REQUEST_FUNC) as netapp_request:
+ netapp_request.return_value = (200, self.RAID6_CANDIDATE_DRIVES)
+ with patch(self.DRIVES_PROPERTY, new_callable=PropertyMock) as drives:
+ drives.return_value = self.DRIVES_DATA
+
+ storagepool = self._initialize_dummy_instance(
+ {"state": "present", "name": "raid6_vg", "criteria_drive_count": "6", "raid_level": "raid6"})
+ self.assertEqual(storagepool.get_candidate_drives(),
+ {'candidateSelectionType': 'count', 'driveMediaType': 'hdd',
+ 'protectionInformationCapabilities': {'protectionInformationCapable': True,
+ 'protectionType': 'type2Protection'},
+ 'dssPreallocEnabled': False, 'phyDriveType': 'sas', 'allocateReservedSpace': False,
+ 'trayLossProtection': False, 'raidLevel': 'raid6', 'spindleSpeed': 10000,
+ 'securityType': 'capable', 'securityLevel': 'fde', 'spindleSpeedMatch': True,
+ 'driveBlockFormat': 'allNative', 'protectionInformationCapable': False,
+ 'freeExtentRef': '0000000000000000000000000000000000000000', 'driveCount': 6,
+ 'driveRefList': {'driveRef': ['010000005000C500551E7F2B0000000000000000',
+ '010000005000C500551EC9270000000000000000',
+ '010000005000C500551EC97F0000000000000000',
+ '010000005000C500551ECBFF0000000000000000',
+ '010000005000C500551E9ED30000000000000000',
+ '010000005000C500551EA4CF0000000000000000']},
+ 'rawSize': '1197852491776', 'usableSize': '1197851279360',
+ 'drawerLossProtection': False,
+ 'volumeCandidateData': {'type': 'traditional', 'diskPoolVolumeCandidateData': None}})
+
+ def test_get_expansion_candidate_drives(self):
+ """Verify correct drive list is returned"""
+ with patch(self.NETAPP_REQUEST_FUNC) as netapp_request:
+ netapp_request.return_value = (200, self.EXPANSION_DDP_DRIVE_DATA)
+ with patch(self.DRIVES_PROPERTY, new_callable=PropertyMock) as drives:
+ drives.return_value = self.DRIVES_DATA
+
+ storagepool = self._initialize_dummy_instance(
+ {"state": "present", "name": "pool", "criteria_drive_count": "20", "raid_level": "raidDiskPool"})
+ storagepool.pool_detail = self.STORAGE_POOL_DATA[0]
+ self.assertEqual(storagepool.get_expansion_candidate_drives(), [
+ {'drawerLossProtection': False, 'trayLossProtection': False,
+ 'drives': ['010000005000C500551E7F2B0000000000000000', '010000005000C500551E99230000000000000000',
+ '010000005000C500551E9ED30000000000000000', '010000005000C500551EA29F0000000000000000',
+ '010000005000C500551EA4CF0000000000000000', '010000005000C500551EC9270000000000000000'],
+ 'spindleSpeedMatch': True, 'driveBlockFormat': 'allNative', 'usableCapacity': '1796778774528',
+ 'wastedCapacity': '0'}])
+
+ def test_get_maximum_reserve_drive_count(self):
+ """Ensure maximum reserve drive count is accurately calculated."""
+ with patch(self.NETAPP_REQUEST_FUNC) as netapp_request:
+ netapp_request.return_value = (200, self.EXPANSION_DDP_DRIVE_DATA)
+ with patch(self.DRIVES_PROPERTY, new_callable=PropertyMock) as drives:
+ drives.return_value = self.DRIVES_DATA
+
+ storagepool = self._initialize_dummy_instance(
+ {"state": "present", "name": "pool", "criteria_drive_count": "20", "raid_level": "raidDiskPool"})
+ storagepool.pool_detail = self.STORAGE_POOL_DATA[0]
+ self.assertEqual(storagepool.get_maximum_reserve_drive_count(), 5)
+
+ def test_apply_check_mode_unchange(self):
+ """Verify that the changes are appropriately determined."""
+ # Absent storage pool required to be absent
+ with self.assertRaisesRegexp(AnsibleExitJson, "'changed': False"):
+ with patch(self.DRIVES_PROPERTY, new_callable=PropertyMock) as drives:
+ drives.return_value = self.DRIVES_DATA
+ with patch(self.STORAGE_POOL_PROPERTY, new_callable=PropertyMock) as storage_pool:
+ storage_pool.return_value = {}
+ storagepool = self._initialize_dummy_instance(
+ {"state": "absent", "name": "not-a-pool", "erase_secured_drives": False,
+ "criteria_drive_count": "14", "raid_level": "raidDiskPool"})
+ storagepool.module.check_mode = True
+ storagepool.is_drive_count_valid = lambda x: True
+ storagepool.apply()
+
+ # Present storage pool with no changes
+ with self.assertRaisesRegexp(AnsibleExitJson, "'changed': False"):
+ with patch(self.DRIVES_PROPERTY, new_callable=PropertyMock) as drives:
+ drives.return_value = self.DRIVES_DATA
+ with patch(self.STORAGE_POOL_PROPERTY, new_callable=PropertyMock) as storage_pool:
+ storage_pool.return_value = self.STORAGE_POOL_DATA[0]
+ storagepool = self._initialize_dummy_instance(
+ {"state": "present", "name": "pool", "erase_secured_drives": False,
+ "criteria_drive_count": "14", "raid_level": "raidDiskPool"})
+ storagepool.module.check_mode = True
+ storagepool.is_drive_count_valid = lambda x: True
+ storagepool.apply()
+
+ def test_apply_check_mode_change(self):
+ """Verify that the changes are appropriately determined."""
+ # Remove absent storage pool
+ with self.assertRaisesRegexp(AnsibleExitJson, "'changed': True"):
+ with patch(self.DRIVES_PROPERTY, new_callable=PropertyMock) as drives:
+ drives.return_value = self.DRIVES_DATA
+ with patch(self.STORAGE_POOL_PROPERTY, new_callable=PropertyMock) as storage_pool:
+ storage_pool.return_value = self.STORAGE_POOL_DATA[0]
+ storagepool = self._initialize_dummy_instance(
+ {"state": "absent", "name": "pool", "erase_secured_drives": False, "criteria_drive_count": "14",
+ "raid_level": "raidDiskPool"})
+ storagepool.module.check_mode = True
+ storagepool.is_drive_count_valid = lambda x: True
+ storagepool.apply()
+
+ # Expand present storage pool
+ with self.assertRaisesRegexp(AnsibleExitJson, "'changed': True"):
+ with patch(self.DRIVES_PROPERTY, new_callable=PropertyMock) as drives:
+ drives.return_value = self.DRIVES_DATA
+ with patch(self.STORAGE_POOL_PROPERTY, new_callable=PropertyMock) as storage_pool:
+ storage_pool.return_value = self.STORAGE_POOL_DATA[0]
+ storagepool = self._initialize_dummy_instance(
+ {"state": "present", "name": "pool", "erase_secured_drives": False,
+ "criteria_drive_count": "15", "raid_level": "raidDiskPool"})
+ storagepool.module.check_mode = True
+ storagepool.is_drive_count_valid = lambda x: True
+ storagepool.expand_storage_pool = lambda check_mode: (True, 100)
+ storagepool.migrate_raid_level = lambda check_mode: False
+ storagepool.secure_storage_pool = lambda check_mode: False
+ storagepool.set_reserve_drive_count = lambda check_mode: False
+ storagepool.apply()
+
+ # Migrate present storage pool raid level
+ with self.assertRaisesRegexp(AnsibleExitJson, "'changed': True"):
+ with patch(self.DRIVES_PROPERTY, new_callable=PropertyMock) as drives:
+ drives.return_value = self.DRIVES_DATA
+ with patch(self.STORAGE_POOL_PROPERTY, new_callable=PropertyMock) as storage_pool:
+ storage_pool.return_value = self.STORAGE_POOL_DATA[0]
+ storagepool = self._initialize_dummy_instance(
+ {"state": "present", "name": "pool", "erase_secured_drives": False,
+ "criteria_drive_count": "15", "raid_level": "raidDiskPool"})
+ storagepool.module.check_mode = True
+ storagepool.is_drive_count_valid = lambda x: True
+ storagepool.expand_storage_pool = lambda check_mode: (False, 0)
+ storagepool.migrate_raid_level = lambda check_mode: True
+ storagepool.secure_storage_pool = lambda check_mode: False
+ storagepool.set_reserve_drive_count = lambda check_mode: False
+ storagepool.apply()
+
+ # Secure present storage pool
+ with self.assertRaisesRegexp(AnsibleExitJson, "'changed': True"):
+ with patch(self.DRIVES_PROPERTY, new_callable=PropertyMock) as drives:
+ drives.return_value = self.DRIVES_DATA
+ with patch(self.STORAGE_POOL_PROPERTY, new_callable=PropertyMock) as storage_pool:
+ storage_pool.return_value = self.STORAGE_POOL_DATA[0]
+ storagepool = self._initialize_dummy_instance(
+ {"state": "present", "name": "pool", "erase_secured_drives": False,
+ "criteria_drive_count": "15", "raid_level": "raidDiskPool"})
+ storagepool.module.check_mode = True
+ storagepool.is_drive_count_valid = lambda x: True
+ storagepool.expand_storage_pool = lambda check_mode: (False, 0)
+ storagepool.migrate_raid_level = lambda check_mode: False
+ storagepool.secure_storage_pool = lambda check_mode: True
+ storagepool.set_reserve_drive_count = lambda check_mode: False
+ storagepool.apply()
+
+ # Change present storage pool reserve drive count
+ with self.assertRaisesRegexp(AnsibleExitJson, "'changed': True"):
+ with patch(self.DRIVES_PROPERTY, new_callable=PropertyMock) as drives:
+ drives.return_value = self.DRIVES_DATA
+ with patch(self.STORAGE_POOL_PROPERTY, new_callable=PropertyMock) as storage_pool:
+ storage_pool.return_value = self.STORAGE_POOL_DATA[0]
+ storagepool = self._initialize_dummy_instance(
+ {"state": "present", "name": "pool", "erase_secured_drives": False,
+ "criteria_drive_count": "15", "raid_level": "raidDiskPool"})
+ storagepool.module.check_mode = True
+ storagepool.is_drive_count_valid = lambda x: True
+ storagepool.expand_storage_pool = lambda check_mode: (False, 0)
+ storagepool.migrate_raid_level = lambda check_mode: False
+ storagepool.secure_storage_pool = lambda check_mode: False
+ storagepool.set_reserve_drive_count = lambda check_mode: True
+ storagepool.apply()
diff --git a/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_syslog.py b/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_syslog.py
new file mode 100644
index 000000000..b36278bfe
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_syslog.py
@@ -0,0 +1,128 @@
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_syslog import NetAppESeriesSyslog
+from units.modules.utils import AnsibleFailJson, ModuleTestCase, set_module_args
+from units.compat import mock
+
+
+class AsupTest(ModuleTestCase):
+ REQUIRED_PARAMS = {
+ "api_username": "rw",
+ "api_password": "password",
+ "api_url": "http://localhost",
+ }
+ REQ_FUNC = 'ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_syslog.NetAppESeriesSyslog.request'
+ BASE_REQ_FUNC = 'ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity.request'
+
+ def _set_args(self, args=None):
+ module_args = self.REQUIRED_PARAMS.copy()
+ if args is not None:
+ module_args.update(args)
+ set_module_args(module_args)
+
+ def test_test_configuration_fail(self):
+ """Validate test_configuration fails when request exception is thrown."""
+ initial = {"state": "present",
+ "ssid": "1",
+ "address": "192.168.1.1",
+ "port": "514",
+ "protocol": "udp",
+ "components": ["auditLog"]}
+ self._set_args(initial)
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.00.00.00"}), (200, {"runningAsProxy": False})]):
+ syslog = NetAppESeriesSyslog()
+
+ with self.assertRaisesRegexp(AnsibleFailJson, r"We failed to send test message!"):
+ with mock.patch(self.REQ_FUNC, return_value=Exception()):
+ syslog.test_configuration(self.REQUIRED_PARAMS)
+
+ def test_update_configuration_record_match_pass(self):
+ """Verify existing syslog server record match does not issue update request."""
+ initial = {"state": "present",
+ "ssid": "1",
+ "address": "192.168.1.1",
+ "port": "514",
+ "protocol": "udp",
+ "components": ["auditLog"]}
+ expected = [{"id": "123456",
+ "serverAddress": "192.168.1.1",
+ "port": 514,
+ "protocol": "udp",
+ "components": [{"type": "auditLog"}]}]
+
+ self._set_args(initial)
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.00.00.00"}), (200, {"runningAsProxy": False})]):
+ syslog = NetAppESeriesSyslog()
+
+ with mock.patch(self.REQ_FUNC, side_effect=[(200, expected), (200, None)]):
+ updated = syslog.update_configuration()
+ self.assertFalse(updated)
+
+ def test_update_configuration_record_partial_match_pass(self):
+ """Verify existing syslog server record partial match results in an update request."""
+ initial = {"state": "present",
+ "ssid": "1",
+ "address": "192.168.1.1",
+ "port": "514",
+ "protocol": "tcp",
+ "components": ["auditLog"]}
+ expected = [{"id": "123456",
+ "serverAddress": "192.168.1.1",
+ "port": 514,
+ "protocol": "udp",
+ "components": [{"type": "auditLog"}]}]
+
+ self._set_args(initial)
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.00.00.00"}), (200, {"runningAsProxy": False})]):
+ syslog = NetAppESeriesSyslog()
+
+ with mock.patch(self.REQ_FUNC, side_effect=[(200, expected), (200, None)]):
+ updated = syslog.update_configuration()
+ self.assertTrue(updated)
+
+ def test_update_configuration_record_no_match_pass(self):
+ """Verify existing syslog server record partial match results in an update request."""
+ initial = {"state": "present",
+ "ssid": "1",
+ "address": "192.168.1.1",
+ "port": "514",
+ "protocol": "tcp",
+ "components": ["auditLog"]}
+ expected = [{"id": "123456",
+ "serverAddress": "192.168.1.100",
+ "port": 514,
+ "protocol": "udp",
+ "components": [{"type": "auditLog"}]}]
+
+ self._set_args(initial)
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.00.00.00"}), (200, {"runningAsProxy": False})]):
+ syslog = NetAppESeriesSyslog()
+
+ with mock.patch(self.REQ_FUNC, side_effect=[(200, expected), (200, dict(id=1234))]):
+ updated = syslog.update_configuration()
+ self.assertTrue(updated)
+
+ def test_update_configuration_record_no_match_defaults_pass(self):
+ """Verify existing syslog server record partial match results in an update request."""
+ initial = {"state": "present",
+ "ssid": "1",
+ "address": "192.168.1.1",
+ "port": "514",
+ "protocol": "tcp",
+ "components": ["auditLog"]}
+ expected = [{"id": "123456",
+ "serverAddress": "192.168.1.100",
+ "port": 514,
+ "protocol": "udp",
+ "components": [{"type": "auditLog"}]}]
+
+ self._set_args(initial)
+ with mock.patch(self.BASE_REQ_FUNC, side_effect=[(200, {"version": "04.00.00.00"}), (200, {"runningAsProxy": False})]):
+ syslog = NetAppESeriesSyslog()
+
+ with mock.patch(self.REQ_FUNC, side_effect=[(200, expected), (200, dict(id=1234))]):
+ updated = syslog.update_configuration()
+ self.assertTrue(updated)
diff --git a/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_volume.py b/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_volume.py
new file mode 100644
index 000000000..4bf547b38
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/tests/unit/modules/test_na_santricity_volume.py
@@ -0,0 +1,864 @@
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_volume import NetAppESeriesVolume
+from units.modules.utils import AnsibleFailJson, ModuleTestCase, set_module_args
+from units.compat import mock
+
+
+class NetAppESeriesVolumeTest(ModuleTestCase):
+ REQUIRED_PARAMS = {"api_username": "username",
+ "api_password": "password",
+ "api_url": "http://localhost/devmgr/v2",
+ "ssid": "1",
+ "validate_certs": "no"}
+
+ THIN_VOLUME_RESPONSE = [{"capacity": "1288490188800",
+ "volumeRef": "3A000000600A098000A4B28D000010475C405428",
+ "status": "optimal",
+ "protectionType": "type1Protection",
+ "maxVirtualCapacity": "281474976710656",
+ "initialProvisionedCapacity": "4294967296",
+ "currentProvisionedCapacity": "4294967296",
+ "provisionedCapacityQuota": "1305670057984",
+ "growthAlertThreshold": 85,
+ "expansionPolicy": "automatic",
+ "flashCached": False,
+ "metadata": [{"key": "workloadId", "value": "4200000001000000000000000000000000000000"},
+ {"key": "volumeTypeId", "value": "volume"}],
+ "dataAssurance": True,
+ "segmentSize": 131072,
+ "diskPool": True,
+ "listOfMappings": [],
+ "mapped": False,
+ "currentControllerId": "070000000000000000000001",
+ "cacheSettings": {"readCacheEnable": True, "writeCacheEnable": True,
+ "readAheadMultiplier": 0},
+ "name": "thin_volume",
+ "id": "3A000000600A098000A4B28D000010475C405428"}]
+ VOLUME_GET_RESPONSE = [{"offline": False,
+ "raidLevel": "raid6",
+ "capacity": "214748364800",
+ "reconPriority": 1,
+ "segmentSize": 131072,
+ "volumeRef": "02000000600A098000A4B9D100000F095C2F7F31",
+ "status": "optimal",
+ "protectionInformationCapable": False,
+ "protectionType": "type0Protection",
+ "diskPool": True,
+ "flashCached": False,
+ "metadata": [{"key": "workloadId", "value": "4200000002000000000000000000000000000000"},
+ {"key": "volumeTypeId", "value": "Clare"}],
+ "dataAssurance": False,
+ "currentControllerId": "070000000000000000000002",
+ "cacheSettings": {"readCacheEnable": True, "writeCacheEnable": False,
+ "readAheadMultiplier": 0},
+ "thinProvisioned": False,
+ "totalSizeInBytes": "214748364800",
+ "name": "Matthew",
+ "id": "02000000600A098000A4B9D100000F095C2F7F31"},
+ {"offline": False,
+ "raidLevel": "raid6",
+ "capacity": "107374182400",
+ "reconPriority": 1,
+ "segmentSize": 131072,
+ "volumeRef": "02000000600A098000A4B28D00000FBE5C2F7F26",
+ "status": "optimal",
+ "protectionInformationCapable": False,
+ "protectionType": "type0Protection",
+ "diskPool": True,
+ "flashCached": False,
+ "metadata": [{"key": "workloadId", "value": "4200000002000000000000000000000000000000"},
+ {"key": "volumeTypeId", "value": "Samantha"}],
+ "dataAssurance": False,
+ "currentControllerId": "070000000000000000000001",
+ "cacheSettings": {"readCacheEnable": True, "writeCacheEnable": False,
+ "readAheadMultiplier": 0},
+ "thinProvisioned": False,
+ "totalSizeInBytes": "107374182400",
+ "name": "Samantha",
+ "id": "02000000600A098000A4B28D00000FBE5C2F7F26"},
+ {"offline": False,
+ "raidLevel": "raid6",
+ "capacity": "107374182400",
+ "segmentSize": 131072,
+ "volumeRef": "02000000600A098000A4B9D100000F0B5C2F7F40",
+ "status": "optimal",
+ "protectionInformationCapable": False,
+ "protectionType": "type0Protection",
+ "volumeGroupRef": "04000000600A098000A4B9D100000F085C2F7F26",
+ "diskPool": True,
+ "flashCached": False,
+ "metadata": [{"key": "workloadId", "value": "4200000002000000000000000000000000000000"},
+ {"key": "volumeTypeId", "value": "Micah"}],
+ "dataAssurance": False,
+ "currentControllerId": "070000000000000000000002",
+ "cacheSettings": {"readCacheEnable": True, "writeCacheEnable": False,
+ "readAheadMultiplier": 0},
+ "thinProvisioned": False,
+ "totalSizeInBytes": "107374182400",
+ "name": "Micah",
+ "id": "02000000600A098000A4B9D100000F0B5C2F7F40"}]
+ STORAGE_POOL_GET_RESPONSE = [{"offline": False,
+ "raidLevel": "raidDiskPool",
+ "volumeGroupRef": "04000000600A",
+ "securityType": "capable",
+ "protectionInformationCapable": False,
+ "protectionInformationCapabilities": {"protectionInformationCapable": True,
+ "protectionType": "type2Protection"},
+ "volumeGroupData": {"type": "diskPool",
+ "diskPoolData": {"reconstructionReservedDriveCount": 1,
+ "reconstructionReservedAmt": "296889614336",
+ "reconstructionReservedDriveCountCurrent": 1,
+ "poolUtilizationWarningThreshold": 0,
+ "poolUtilizationCriticalThreshold": 85,
+ "poolUtilizationState": "utilizationOptimal",
+ "unusableCapacity": "0",
+ "degradedReconstructPriority": "high",
+ "criticalReconstructPriority": "highest",
+ "backgroundOperationPriority": "low",
+ "allocGranularity": "4294967296"}},
+ "reservedSpaceAllocated": False,
+ "securityLevel": "fde",
+ "usedSpace": "863288426496",
+ "totalRaidedSpace": "2276332666880",
+ "raidStatus": "optimal",
+ "freeSpace": "1413044240384",
+ "drivePhysicalType": "sas",
+ "driveMediaType": "hdd",
+ "diskPool": True,
+ "id": "04000000600A098000A4B9D100000F085C2F7F26",
+ "name": "employee_data_storage_pool"},
+ {"offline": False,
+ "raidLevel": "raid1",
+ "volumeGroupRef": "04000000600A098000A4B28D00000FBD5C2F7F19",
+ "state": "complete",
+ "securityType": "capable",
+ "drawerLossProtection": False,
+ "protectionInformationCapable": False,
+ "protectionInformationCapabilities": {"protectionInformationCapable": True,
+ "protectionType": "type2Protection"},
+ "volumeGroupData": {"type": "unknown", "diskPoolData": None},
+ "reservedSpaceAllocated": False,
+ "securityLevel": "fde",
+ "usedSpace": "322122547200",
+ "totalRaidedSpace": "598926258176",
+ "raidStatus": "optimal",
+ "freeSpace": "276803710976",
+ "drivePhysicalType": "sas",
+ "driveMediaType": "hdd",
+ "diskPool": False,
+ "id": "04000000600A098000A4B28D00000FBD5C2F7F19",
+ "name": "database_storage_pool"}]
+
+ GET_LONG_LIVED_OPERATION_RESPONSE = [
+ {"returnCode": "ok",
+ "longLivedOpsProgress": [
+ {"volAction": "initializing", "reconstruct": None, "volExpansion": None, "volAndCapExpansion": None,
+ "init": {"volumeRef": "02000000600A098000A4B9D1000037315D494C6F", "pending": False, "percentComplete": 1, "timeToCompletion": 20},
+ "format": None, "volCreation": None, "volDeletion": None},
+ {"volAction": "initializing", "reconstruct": None, "volExpansion": None, "volAndCapExpansion": None,
+ "init": {"volumeRef": "02000000600A098000A4B28D00003D2C5D494C87", "pending": False, "percentComplete": 0, "timeToCompletion": 18},
+ "volCreation": None, "volDeletion": None}]},
+ {"returnCode": "ok",
+ "longLivedOpsProgress": [
+ {"volAction": "complete", "reconstruct": None, "volExpansion": None, "volAndCapExpansion": None,
+ "init": {"volumeRef": "02000000600A098000A4B9D1000037315D494C6F", "pending": False, "percentComplete": 1, "timeToCompletion": 20},
+ "format": None, "volCreation": None, "volDeletion": None},
+ {"volAction": "initializing", "reconstruct": None, "volExpansion": None, "volAndCapExpansion": None,
+ "init": {"volumeRef": "02000000600A098000A4B28D00003D2C5D494C87", "pending": False, "percentComplete": 0, "timeToCompletion": 18},
+ "volCreation": None, "volDeletion": None}]},
+ {"returnCode": "ok",
+ "longLivedOpsProgress": [
+ {"volAction": "initializing", "reconstruct": None, "volExpansion": None, "volAndCapExpansion": None,
+ "init": {"volumeRef": "02000000600A098000A4B9D1000037315D494C6F", "pending": False, "percentComplete": 1, "timeToCompletion": 20},
+ "format": None, "volCreation": None, "volDeletion": None},
+ {"volAction": "complete", "reconstruct": None, "volExpansion": None, "volAndCapExpansion": None,
+ "init": {"volumeRef": "02000000600A098000A4B28D00003D2C5D494C87", "pending": False, "percentComplete": 0, "timeToCompletion": 18},
+ "volCreation": None, "volDeletion": None}]},
+ {"returnCode": "ok",
+ "longLivedOpsProgress": [
+ {"volAction": "complete", "reconstruct": None, "volExpansion": None, "volAndCapExpansion": None,
+ "init": {"volumeRef": "02000000600A098000A4B9D1000037315D494C6F", "pending": False, "percentComplete": 1, "timeToCompletion": 20},
+ "format": None, "volCreation": None, "volDeletion": None},
+ {"volAction": "complete", "reconstruct": None, "volExpansion": None, "volAndCapExpansion": None,
+ "init": {"volumeRef": "02000000600A098000A4B28D00003D2C5D494C87", "pending": False, "percentComplete": 0, "timeToCompletion": 18},
+ "volCreation": None, "volDeletion": None}]}]
+
+ WORKLOAD_GET_RESPONSE = [{"id": "4200000001000000000000000000000000000000", "name": "general_workload_1",
+ "workloadAttributes": [{"key": "profileId", "value": "Other_1"}]},
+ {"id": "4200000002000000000000000000000000000000", "name": "employee_data",
+ "workloadAttributes": [{"key": "use", "value": "EmployeeData"},
+ {"key": "location", "value": "ICT"},
+ {"key": "private", "value": "public"},
+ {"key": "profileId", "value": "ansible_workload_1"}]},
+ {"id": "4200000003000000000000000000000000000000", "name": "customer_database",
+ "workloadAttributes": [{"key": "use", "value": "customer_information"},
+ {"key": "location", "value": "global"},
+ {"key": "profileId", "value": "ansible_workload_2"}]},
+ {"id": "4200000004000000000000000000000000000000", "name": "product_database",
+ "workloadAttributes": [{"key": "use", "value": "production_information"},
+ {"key": "security", "value": "private"},
+ {"key": "location", "value": "global"},
+ {"key": "profileId", "value": "ansible_workload_4"}]}]
+
+ REQUEST_FUNC = "ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_volume.NetAppESeriesVolume.request"
+ GET_VOLUME_FUNC = "ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_volume.NetAppESeriesVolume.get_volume"
+ SLEEP_FUNC = "ansible_collections.netapp_eseries.santricity.plugins.modules.na_santricity_volume.time.sleep"
+
+ def _set_args(self, args=None):
+ module_args = self.REQUIRED_PARAMS.copy()
+ if args is not None:
+ module_args.update(args)
+ set_module_args(module_args)
+
+ def test_module_arguments_pass(self):
+ """Ensure valid arguments successful create a class instance."""
+ arg_sets = [{"state": "present", "name": "vol", "storage_pool_name": "pool", "size": 100, "size_unit": "tb",
+ "thin_provision": True, "thin_volume_repo_size": 64, "thin_volume_max_repo_size": 1000,
+ "thin_volume_growth_alert_threshold": 10},
+ {"state": "present", "name": "vol", "storage_pool_name": "pool", "size": 100, "size_unit": "gb",
+ "thin_provision": True, "thin_volume_repo_size": 64, "thin_volume_max_repo_size": 1024,
+ "thin_volume_growth_alert_threshold": 99},
+ {"state": "present", "name": "vol", "storage_pool_name": "pool", "size": 100, "size_unit": "gb",
+ "thin_provision": True, "thin_volume_repo_size": 64},
+ {"state": "present", "name": "vol", "storage_pool_name": "pool", "size": 100, "size_unit": "kb",
+ "thin_provision": True, "thin_volume_repo_size": 64, "thin_volume_max_repo_size": 67108864}]
+
+ # validate size normalization
+ for arg_set in arg_sets:
+ self._set_args(arg_set)
+ volume_object = NetAppESeriesVolume()
+
+ self.assertEqual(volume_object.size_b, volume_object.convert_to_aligned_bytes(arg_set["size"]))
+ self.assertEqual(volume_object.thin_volume_repo_size_b, volume_object.convert_to_aligned_bytes(arg_set["thin_volume_repo_size"]))
+ self.assertEqual(volume_object.thin_volume_expansion_policy, "automatic")
+ if "thin_volume_max_repo_size" not in arg_set.keys():
+ self.assertEqual(volume_object.thin_volume_max_repo_size_b, volume_object.convert_to_aligned_bytes(arg_set["size"]))
+ else:
+ self.assertEqual(volume_object.thin_volume_max_repo_size_b,
+ volume_object.convert_to_aligned_bytes(arg_set["thin_volume_max_repo_size"]))
+
+ # validate metadata form
+ self._set_args(
+ {"state": "present", "name": "vol", "storage_pool_name": "pool", "size": 10, "workload_name": "workload1",
+ "metadata": {"availability": "public", "security": "low"}})
+ volume_object = NetAppESeriesVolume()
+ for entry in volume_object.metadata:
+ self.assertTrue(entry in [{'value': 'low', 'key': 'security'}, {'value': 'public', 'key': 'availability'}])
+
+ def test_module_arguments_fail(self):
+ """Ensure invalid arguments values do not create a class instance."""
+ arg_sets = [{"state": "present", "name": "vol", "storage_pool_name": "pool", "size": 100, "size_unit": "tb",
+ "thin_provision": True, "thin_volume_repo_size": 260},
+ {"state": "present", "name": "vol", "storage_pool_name": "pool", "size": 10000, "size_unit": "tb",
+ "thin_provision": True, "thin_volume_repo_size": 64, "thin_volume_max_repo_size": 10},
+ {"state": "present", "name": "vol", "storage_pool_name": "pool", "size": 10000, "size_unit": "gb",
+ "thin_provision": True, "thin_volume_repo_size": 64, "thin_volume_max_repo_size": 1000,
+ "thin_volume_growth_alert_threshold": 9},
+ {"state": "present", "name": "vol", "storage_pool_name": "pool", "size": 10000, "size_unit": "gb",
+ "thin_provision": True, "thin_volume_repo_size": 64, "thin_volume_max_repo_size": 1000,
+ "thin_volume_growth_alert_threshold": 100}]
+
+ for arg_set in arg_sets:
+ with self.assertRaises(AnsibleFailJson):
+ self._set_args(arg_set)
+ print(arg_set)
+ volume_object = NetAppESeriesVolume()
+
+ def test_get_volume_pass(self):
+ """Evaluate the get_volume method."""
+ with mock.patch(self.REQUEST_FUNC,
+ side_effect=[(200, self.VOLUME_GET_RESPONSE), (200, self.THIN_VOLUME_RESPONSE)]):
+ self._set_args({"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100})
+ volume_object = NetAppESeriesVolume()
+ self.assertEqual(volume_object.get_volume(),
+ [entry for entry in self.VOLUME_GET_RESPONSE if entry["name"] == "Matthew"][0])
+
+ with mock.patch(self.REQUEST_FUNC,
+ side_effect=[(200, self.VOLUME_GET_RESPONSE), (200, self.THIN_VOLUME_RESPONSE)]):
+ self._set_args({"state": "present", "name": "NotAVolume", "storage_pool_name": "pool", "size": 100})
+ volume_object = NetAppESeriesVolume()
+ self.assertEqual(volume_object.get_volume(), {})
+
+ def test_get_volume_fail(self):
+ """Evaluate the get_volume exception paths."""
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to obtain list of thick volumes."):
+ with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
+ self._set_args({"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100})
+ volume_object = NetAppESeriesVolume()
+ volume_object.get_volume()
+
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to obtain list of thin volumes."):
+ with mock.patch(self.REQUEST_FUNC, side_effect=[(200, self.VOLUME_GET_RESPONSE), Exception()]):
+ self._set_args({"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100})
+ volume_object = NetAppESeriesVolume()
+ volume_object.get_volume()
+
+ def tests_wait_for_volume_availability_pass(self):
+ """Ensure wait_for_volume_availability completes as expected."""
+ self._set_args({"state": "present", "name": "NewVolume", "storage_pool_name": "employee_data_storage_pool", "size": 100,
+ "wait_for_initialization": True})
+ volume_object = NetAppESeriesVolume()
+ with mock.patch(self.SLEEP_FUNC, return_value=None):
+ with mock.patch(self.GET_VOLUME_FUNC, side_effect=[False, False, True]):
+ volume_object.wait_for_volume_availability()
+
+ def tests_wait_for_volume_availability_fail(self):
+ """Ensure wait_for_volume_availability throws the expected exceptions."""
+ self._set_args({"state": "present", "name": "NewVolume", "storage_pool_name": "employee_data_storage_pool", "size": 100,
+ "wait_for_initialization": True})
+ volume_object = NetAppESeriesVolume()
+ volume_object.get_volume = lambda: False
+ with self.assertRaisesRegexp(AnsibleFailJson, "Timed out waiting for the volume"):
+ with mock.patch(self.SLEEP_FUNC, return_value=None):
+ volume_object.wait_for_volume_availability()
+
+ def tests_wait_for_volume_action_pass(self):
+ """Ensure wait_for_volume_action completes as expected."""
+ self._set_args({"state": "present", "name": "NewVolume", "storage_pool_name": "employee_data_storage_pool", "size": 100,
+ "wait_for_initialization": True})
+ volume_object = NetAppESeriesVolume()
+ volume_object.volume_detail = {"id": "02000000600A098000A4B9D1000037315D494C6F",
+ "storageVolumeRef": "02000000600A098000A4B9D1000037315DXXXXXX"}
+ with mock.patch(self.SLEEP_FUNC, return_value=None):
+ with mock.patch(self.REQUEST_FUNC, side_effect=[(200, self.GET_LONG_LIVED_OPERATION_RESPONSE[0]),
+ (200, self.GET_LONG_LIVED_OPERATION_RESPONSE[1]),
+ (200, self.GET_LONG_LIVED_OPERATION_RESPONSE[2]),
+ (200, self.GET_LONG_LIVED_OPERATION_RESPONSE[3])]):
+ volume_object.wait_for_volume_action()
+
+ self._set_args({"state": "present", "name": "NewVolume", "storage_pool_name": "employee_data_storage_pool", "size": 100,
+ "wait_for_initialization": True})
+ volume_object = NetAppESeriesVolume()
+ volume_object.volume_detail = {"id": "02000000600A098000A4B9D1000037315DXXXXXX",
+ "storageVolumeRef": "02000000600A098000A4B9D1000037315D494C6F"}
+ with mock.patch(self.SLEEP_FUNC, return_value=None):
+ with mock.patch(self.REQUEST_FUNC, side_effect=[(200, self.GET_LONG_LIVED_OPERATION_RESPONSE[0]),
+ (200, self.GET_LONG_LIVED_OPERATION_RESPONSE[1]),
+ (200, self.GET_LONG_LIVED_OPERATION_RESPONSE[2]),
+ (200, self.GET_LONG_LIVED_OPERATION_RESPONSE[3])]):
+ volume_object.wait_for_volume_action()
+
+ def tests_wait_for_volume_action_fail(self):
+ """Ensure wait_for_volume_action throws the expected exceptions."""
+ self._set_args({"state": "present", "name": "NewVolume", "storage_pool_name": "employee_data_storage_pool", "size": 100,
+ "wait_for_initialization": True})
+ volume_object = NetAppESeriesVolume()
+ volume_object.volume_detail = {"id": "02000000600A098000A4B9D1000037315DXXXXXX",
+ "storageVolumeRef": "02000000600A098000A4B9D1000037315D494C6F"}
+ with mock.patch(self.SLEEP_FUNC, return_value=None):
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to get volume expansion progress."):
+ with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
+ volume_object.wait_for_volume_action()
+
+ with self.assertRaisesRegexp(AnsibleFailJson, "Expansion action failed to complete."):
+ with mock.patch(self.REQUEST_FUNC, return_value=(200, self.GET_LONG_LIVED_OPERATION_RESPONSE[0])):
+ volume_object.wait_for_volume_action(timeout=300)
+
+ def test_get_storage_pool_pass(self):
+ """Evaluate the get_storage_pool method."""
+ with mock.patch(self.REQUEST_FUNC, return_value=(200, self.STORAGE_POOL_GET_RESPONSE)):
+ self._set_args({"state": "present", "name": "NewVolume", "storage_pool_name": "employee_data_storage_pool",
+ "size": 100})
+ volume_object = NetAppESeriesVolume()
+ self.assertEqual(volume_object.get_storage_pool(), [entry for entry in self.STORAGE_POOL_GET_RESPONSE if
+ entry["name"] == "employee_data_storage_pool"][0])
+
+ self._set_args(
+ {"state": "present", "name": "NewVolume", "storage_pool_name": "NotAStoragePool", "size": 100})
+ volume_object = NetAppESeriesVolume()
+ self.assertEqual(volume_object.get_storage_pool(), {})
+
+ def test_get_storage_pool_fail(self):
+ """Evaluate the get_storage_pool exception paths."""
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to obtain list of storage pools."):
+ with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
+ self._set_args({"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100})
+ volume_object = NetAppESeriesVolume()
+ volume_object.get_storage_pool()
+
+ def test_check_storage_pool_sufficiency_pass(self):
+ """Ensure passing logic."""
+ self._set_args({"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100})
+ volume_object = NetAppESeriesVolume()
+ volume_object.pool_detail = [entry for entry in self.STORAGE_POOL_GET_RESPONSE
+ if entry["name"] == "employee_data_storage_pool"][0]
+ volume_object.check_storage_pool_sufficiency()
+
+ def test_check_storage_pool_sufficiency_fail(self):
+ """Validate exceptions are thrown for insufficient storage pool resources."""
+ self._set_args({"state": "present", "name": "vol", "storage_pool_name": "pool", "size": 100, "size_unit": "tb",
+ "thin_provision": True, "thin_volume_repo_size": 64, "thin_volume_max_repo_size": 1000,
+ "thin_volume_growth_alert_threshold": 10})
+ volume_object = NetAppESeriesVolume()
+
+ with self.assertRaisesRegexp(AnsibleFailJson, "Requested storage pool"):
+ volume_object.check_storage_pool_sufficiency()
+
+ with self.assertRaisesRegexp(AnsibleFailJson,
+ "Thin provisioned volumes can only be created on raid disk pools."):
+ volume_object.pool_detail = [entry for entry in self.STORAGE_POOL_GET_RESPONSE
+ if entry["name"] == "database_storage_pool"][0]
+ volume_object.volume_detail = {}
+ volume_object.check_storage_pool_sufficiency()
+
+ with self.assertRaisesRegexp(AnsibleFailJson, "requires the storage pool to be DA-compatible."):
+ volume_object.pool_detail = {"diskPool": True,
+ "protectionInformationCapabilities": {"protectionType": "type0Protection",
+ "protectionInformationCapable": False}}
+ volume_object.volume_detail = {}
+ volume_object.data_assurance_enabled = True
+ volume_object.check_storage_pool_sufficiency()
+
+ volume_object.pool_detail = {"diskPool": True,
+ "protectionInformationCapabilities": {"protectionType": "type2Protection",
+ "protectionInformationCapable": True}}
+ volume_object.check_storage_pool_sufficiency()
+
+ self._set_args({"state": "present", "name": "vol", "storage_pool_name": "pool", "size": 100, "size_unit": "tb",
+ "thin_provision": False})
+ volume_object = NetAppESeriesVolume()
+ with self.assertRaisesRegexp(AnsibleFailJson,
+ "Not enough storage pool free space available for the volume's needs."):
+ volume_object.pool_detail = {"freeSpace": 10, "diskPool": True,
+ "protectionInformationCapabilities": {"protectionType": "type2Protection",
+ "protectionInformationCapable": True}}
+ volume_object.volume_detail = {"totalSizeInBytes": 100}
+ volume_object.data_assurance_enabled = True
+ volume_object.size_b = 1
+ volume_object.check_storage_pool_sufficiency()
+
+ def test_update_workload_tags_pass(self):
+ """Validate updating workload tags."""
+ test_sets = [[{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100}, False],
+ [{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100,
+ "workload_name": "employee_data"}, False],
+ [{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100,
+ "workload_name": "customer_database",
+ "metadata": {"use": "customer_information", "location": "global"}}, False],
+ [{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100,
+ "workload_name": "customer_database",
+ "metadata": {"use": "customer_information"}}, True],
+ [{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100,
+ "workload_name": "customer_database",
+ "metadata": {"use": "customer_information", "location": "local"}}, True],
+ [{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100,
+ "workload_name": "customer_database",
+ "metadata": {"use": "customer_information", "location": "global", "importance": "no"}}, True],
+ [{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100,
+ "workload_name": "newWorkload",
+ "metadata": {"for_testing": "yes"}}, True],
+ [{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100,
+ "workload_name": "newWorkload"}, True]]
+
+ for test in test_sets:
+ self._set_args(test[0])
+ volume_object = NetAppESeriesVolume()
+
+ with mock.patch(self.REQUEST_FUNC, side_effect=[(200, self.WORKLOAD_GET_RESPONSE), (200, {"id": 1})]):
+ self.assertEqual(volume_object.update_workload_tags(), test[1])
+
+ def test_update_workload_tags_fail(self):
+ """Validate updating workload tags fails appropriately."""
+ self._set_args({"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100,
+ "workload_name": "employee_data"})
+ volume_object = NetAppESeriesVolume()
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to retrieve storage array workload tags."):
+ with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
+ volume_object.update_workload_tags()
+
+ self._set_args({"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100,
+ "workload_name": "employee_data", "metadata": {"key": "not-use", "value": "EmployeeData"}})
+ volume_object = NetAppESeriesVolume()
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to create new workload tag."):
+ with mock.patch(self.REQUEST_FUNC, side_effect=[(200, self.WORKLOAD_GET_RESPONSE), Exception()]):
+ volume_object.update_workload_tags()
+
+ self._set_args({"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100,
+ "workload_name": "employee_data2", "metadata": {"key": "use", "value": "EmployeeData"}})
+ volume_object = NetAppESeriesVolume()
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to create new workload tag."):
+ with mock.patch(self.REQUEST_FUNC, side_effect=[(200, self.WORKLOAD_GET_RESPONSE), Exception()]):
+ volume_object.update_workload_tags()
+
+ def test_get_volume_property_changes_pass(self):
+ """Verify correct dictionary is returned"""
+
+ # no property changes
+ self._set_args(
+ {"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "ssd_cache_enabled": True,
+ "read_cache_enable": True, "write_cache_enable": True,
+ "read_ahead_enable": True, "thin_provision": False})
+ volume_object = NetAppESeriesVolume()
+ volume_object.volume_detail = {"metadata": [],
+ "cacheSettings": {"cwob": False, "readCacheEnable": True, "writeCacheEnable": True,
+ "readAheadMultiplier": 1}, "flashCached": True,
+ "segmentSize": str(128 * 1024)}
+ self.assertEqual(volume_object.get_volume_property_changes(), dict())
+
+ self._set_args(
+ {"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "ssd_cache_enabled": True,
+ "read_cache_enable": True, "write_cache_enable": True,
+ "read_ahead_enable": True, "thin_provision": True, "thin_volume_repo_size": 64,
+ "thin_volume_max_repo_size": 1000, "thin_volume_growth_alert_threshold": 90})
+ volume_object = NetAppESeriesVolume()
+ volume_object.volume_detail = {"metadata": [],
+ "cacheSettings": {"cwob": False, "readCacheEnable": True, "writeCacheEnable": True,
+ "readAheadMultiplier": 1},
+ "flashCached": True, "growthAlertThreshold": "90",
+ "expansionPolicy": "automatic", "segmentSize": str(128 * 1024)}
+ self.assertEqual(volume_object.get_volume_property_changes(), dict())
+
+ # property changes
+ self._set_args(
+ {"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "ssd_cache_enabled": True,
+ "read_cache_enable": True, "write_cache_enable": True,
+ "read_ahead_enable": True, "thin_provision": False})
+ volume_object = NetAppESeriesVolume()
+ volume_object.volume_detail = {"metadata": [],
+ "cacheSettings": {"cwob": False, "readCacheEnable": False, "writeCacheEnable": True,
+ "readAheadMultiplier": 1}, "flashCached": True,
+ "segmentSize": str(128 * 1024)}
+ self.assertEqual(volume_object.get_volume_property_changes(),
+ {"metaTags": [], 'cacheSettings': {'readCacheEnable': True, 'writeCacheEnable': True},
+ 'flashCache': True})
+ self._set_args(
+ {"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "ssd_cache_enabled": True,
+ "read_cache_enable": True, "write_cache_enable": True, "cache_without_batteries": False,
+ "read_ahead_enable": True, "thin_provision": False})
+ volume_object = NetAppESeriesVolume()
+ volume_object.volume_detail = {"metadata": [],
+ "cacheSettings": {"cwob": False, "readCacheEnable": True, "writeCacheEnable": False,
+ "readAheadMultiplier": 1}, "flashCached": True,
+ "segmentSize": str(128 * 1024)}
+ self.assertEqual(volume_object.get_volume_property_changes(),
+ {"metaTags": [], 'cacheSettings': {'readCacheEnable': True, 'writeCacheEnable': True},
+ 'flashCache': True})
+ self._set_args(
+ {"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "ssd_cache_enabled": True,
+ "read_cache_enable": True, "write_cache_enable": True, "cache_without_batteries": True,
+ "read_ahead_enable": True, "thin_provision": False})
+ volume_object = NetAppESeriesVolume()
+ volume_object.volume_detail = {"metadata": [],
+ "cacheSettings": {"cwob": False, "readCacheEnable": True, "writeCacheEnable": True,
+ "readAheadMultiplier": 1}, "flashCached": False,
+ "segmentSize": str(128 * 1024)}
+ self.assertEqual(volume_object.get_volume_property_changes(),
+ {"metaTags": [], 'cacheSettings': {'readCacheEnable': True, 'writeCacheEnable': True, "cacheWithoutBatteries": True},
+ 'flashCache': True})
+ self._set_args(
+ {"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "ssd_cache_enabled": True,
+ "read_cache_enable": True, "write_cache_enable": True, "cache_without_batteries": True,
+ "read_ahead_enable": False, "thin_provision": False})
+ volume_object = NetAppESeriesVolume()
+ volume_object.volume_detail = {"metadata": [],
+ "cacheSettings": {"cwob": False, "readCacheEnable": True, "writeCacheEnable": True,
+ "readAheadMultiplier": 1}, "flashCached": False,
+ "segmentSize": str(128 * 1024)}
+ self.assertEqual(volume_object.get_volume_property_changes(), {"metaTags": [],
+ 'cacheSettings': {'readCacheEnable': True,
+ 'writeCacheEnable': True,
+ 'readAheadEnable': False,
+ "cacheWithoutBatteries": True},
+ 'flashCache': True})
+
+ self._set_args(
+ {"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "ssd_cache_enabled": True,
+ "read_cache_enable": True, "write_cache_enable": True,
+ "read_ahead_enable": True, "thin_provision": True, "thin_volume_repo_size": 64,
+ "thin_volume_max_repo_size": 1000, "thin_volume_growth_alert_threshold": 90})
+ volume_object = NetAppESeriesVolume()
+ volume_object.volume_detail = {"metadata": [],
+ "cacheSettings": {"cwob": True, "readCacheEnable": True, "writeCacheEnable": True,
+ "readAheadMultiplier": 1},
+ "flashCached": True, "growthAlertThreshold": "95",
+ "expansionPolicy": "automatic", "segmentSize": str(128 * 1024)}
+ self.assertEqual(volume_object.get_volume_property_changes(),
+ {"metaTags": [], 'cacheSettings': {'readCacheEnable': True, 'writeCacheEnable': True},
+ 'growthAlertThreshold': 90, 'flashCache': True})
+
+ def test_get_volume_property_changes_fail(self):
+ """Verify correct exception is thrown"""
+ self._set_args(
+ {"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "ssd_cache_enabled": True,
+ "read_cache_enable": True, "write_cache_enable": True, "read_ahead_enable": True, "thin_provision": False})
+ volume_object = NetAppESeriesVolume()
+ volume_object.volume_detail = {
+ "cacheSettings": {"cwob": False, "readCacheEnable": True, "writeCacheEnable": True, "readAheadMultiplier": 1},
+ "flashCached": True, "segmentSize": str(512 * 1024)}
+ with self.assertRaisesRegexp(AnsibleFailJson, "Existing volume segment size is"):
+ volume_object.get_volume_property_changes()
+
+ def test_get_expand_volume_changes_pass(self):
+ """Verify expansion changes."""
+ # thick volumes
+ self._set_args(
+ {"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": False})
+ volume_object = NetAppESeriesVolume()
+ volume_object.volume_detail = {"capacity": str(50 * 1024 * 1024 * 1024), "thinProvisioned": False}
+ self.assertEqual(volume_object.get_expand_volume_changes(),
+ {"sizeUnit": "bytes", "expansionSize": 100 * 1024 * 1024 * 1024})
+
+ # thin volumes
+ self._set_args(
+ {"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True,
+ "thin_volume_expansion_policy": "automatic", "thin_volume_repo_size": 64,
+ "thin_volume_max_repo_size": 1000, "thin_volume_growth_alert_threshold": 90})
+ volume_object = NetAppESeriesVolume()
+ volume_object.volume_detail = {"capacity": str(50 * 1024 * 1024 * 1024), "thinProvisioned": True,
+ "expansionPolicy": "automatic",
+ "provisionedCapacityQuota": str(1000 * 1024 * 1024 * 1024)}
+ self.assertEqual(volume_object.get_expand_volume_changes(),
+ {"sizeUnit": "bytes", "newVirtualSize": 100 * 1024 * 1024 * 1024})
+ self._set_args(
+ {"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True,
+ "thin_volume_expansion_policy": "automatic", "thin_volume_repo_size": 64,
+ "thin_volume_max_repo_size": 1000, "thin_volume_growth_alert_threshold": 90})
+ volume_object = NetAppESeriesVolume()
+ volume_object.volume_detail = {"capacity": str(100 * 1024 * 1024 * 1024), "thinProvisioned": True,
+ "expansionPolicy": "automatic",
+ "provisionedCapacityQuota": str(500 * 1024 * 1024 * 1024)}
+ self.assertEqual(volume_object.get_expand_volume_changes(),
+ {"sizeUnit": "bytes", "newRepositorySize": 1000 * 1024 * 1024 * 1024})
+ self._set_args(
+ {"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True,
+ "thin_volume_expansion_policy": "manual", "thin_volume_repo_size": 504, "thin_volume_max_repo_size": 1000,
+ "thin_volume_growth_alert_threshold": 90})
+ volume_object = NetAppESeriesVolume()
+ volume_object.volume_detail = {"capacity": str(100 * 1024 * 1024 * 1024), "thinProvisioned": True,
+ "expansionPolicy": "manual",
+ "currentProvisionedCapacity": str(500 * 1024 * 1024 * 1024)}
+ self.assertEqual(volume_object.get_expand_volume_changes(),
+ {"sizeUnit": "bytes", "newRepositorySize": 504 * 1024 * 1024 * 1024})
+ self._set_args(
+ {"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True,
+ "thin_volume_expansion_policy": "manual", "thin_volume_repo_size": 756, "thin_volume_max_repo_size": 1000,
+ "thin_volume_growth_alert_threshold": 90})
+ volume_object = NetAppESeriesVolume()
+ volume_object.volume_detail = {"capacity": str(100 * 1024 * 1024 * 1024), "thinProvisioned": True,
+ "expansionPolicy": "manual",
+ "currentProvisionedCapacity": str(500 * 1024 * 1024 * 1024)}
+ self.assertEqual(volume_object.get_expand_volume_changes(),
+ {"sizeUnit": "bytes", "newRepositorySize": 756 * 1024 * 1024 * 1024})
+
+ def test_get_expand_volume_changes_fail(self):
+ """Verify exceptions are thrown."""
+ self._set_args(
+ {"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": False})
+ volume_object = NetAppESeriesVolume()
+ volume_object.volume_detail = {"capacity": str(1000 * 1024 * 1024 * 1024)}
+ with self.assertRaisesRegexp(AnsibleFailJson, "Reducing the size of volumes is not permitted."):
+ volume_object.get_expand_volume_changes()
+
+ self._set_args(
+ {"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True,
+ "thin_volume_expansion_policy": "manual", "thin_volume_repo_size": 502, "thin_volume_max_repo_size": 1000,
+ "thin_volume_growth_alert_threshold": 90})
+ volume_object = NetAppESeriesVolume()
+ volume_object.volume_detail = {"capacity": str(100 * 1024 * 1024 * 1024), "thinProvisioned": True,
+ "expansionPolicy": "manual",
+ "currentProvisionedCapacity": str(500 * 1024 * 1024 * 1024)}
+ with self.assertRaisesRegexp(AnsibleFailJson, "The thin volume repository increase must be between or equal"):
+ volume_object.get_expand_volume_changes()
+
+ self._set_args(
+ {"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True,
+ "thin_volume_expansion_policy": "manual", "thin_volume_repo_size": 760, "thin_volume_max_repo_size": 1000,
+ "thin_volume_growth_alert_threshold": 90})
+ volume_object = NetAppESeriesVolume()
+ volume_object.volume_detail = {"capacity": str(100 * 1024 * 1024 * 1024), "thinProvisioned": True,
+ "expansionPolicy": "manual",
+ "currentProvisionedCapacity": str(500 * 1024 * 1024 * 1024)}
+ with self.assertRaisesRegexp(AnsibleFailJson, "The thin volume repository increase must be between or equal"):
+ volume_object.get_expand_volume_changes()
+
+ def test_create_volume_pass(self):
+ """Verify volume creation."""
+ self._set_args(
+ {"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": False})
+ volume_object = NetAppESeriesVolume()
+ volume_object.pool_detail = {"id": "12345"}
+ with mock.patch(self.REQUEST_FUNC, return_value=(200, {})):
+ volume_object.create_volume()
+
+ self._set_args(
+ {"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True,
+ "thin_volume_expansion_policy": "manual", "thin_volume_repo_size": 760, "thin_volume_max_repo_size": 1000,
+ "thin_volume_growth_alert_threshold": 90})
+ volume_object = NetAppESeriesVolume()
+ volume_object.pool_detail = {"id": "12345"}
+ with mock.patch(self.REQUEST_FUNC, return_value=(200, {})):
+ volume_object.create_volume()
+
+ def test_create_volume_fail(self):
+ """Verify exceptions thrown."""
+ self._set_args(
+ {"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": False})
+ volume_object = NetAppESeriesVolume()
+ volume_object.pool_detail = {"id": "12345"}
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to create volume."):
+ with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
+ volume_object.create_volume()
+
+ self._set_args(
+ {"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True,
+ "thin_volume_expansion_policy": "manual", "thin_volume_repo_size": 760, "thin_volume_max_repo_size": 1000,
+ "thin_volume_growth_alert_threshold": 90})
+ volume_object = NetAppESeriesVolume()
+ volume_object.pool_detail = {"id": "12345"}
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to create thin volume."):
+ with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
+ volume_object.create_volume()
+
+ def test_update_volume_properties_pass(self):
+ """verify property update."""
+ self._set_args(
+ {"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": False})
+ volume_object = NetAppESeriesVolume()
+ volume_object.pool_detail = {"id": "12345"}
+ volume_object.wait_for_volume_availability = lambda: None
+ volume_object.get_volume = lambda: {"id": "12345'"}
+ volume_object.get_volume_property_changes = lambda: {
+ 'cacheSettings': {'readCacheEnable': True, 'writeCacheEnable': True}, 'growthAlertThreshold': 90,
+ 'flashCached': True}
+ volume_object.workload_id = "4200000001000000000000000000000000000000"
+ with mock.patch(self.REQUEST_FUNC, return_value=(200, {})):
+ self.assertTrue(volume_object.update_volume_properties())
+
+ self._set_args(
+ {"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True,
+ "thin_volume_expansion_policy": "manual", "thin_volume_repo_size": 760, "thin_volume_max_repo_size": 1000,
+ "thin_volume_growth_alert_threshold": 90})
+ volume_object = NetAppESeriesVolume()
+ volume_object.pool_detail = {"id": "12345"}
+ volume_object.wait_for_volume_availability = lambda: None
+ volume_object.get_volume = lambda: {"id": "12345'"}
+ volume_object.get_volume_property_changes = lambda: {
+ 'cacheSettings': {'readCacheEnable': True, 'writeCacheEnable': True}, 'growthAlertThreshold': 90,
+ 'flashCached': True}
+ volume_object.workload_id = "4200000001000000000000000000000000000000"
+ with mock.patch(self.REQUEST_FUNC, return_value=(200, {})):
+ self.assertTrue(volume_object.update_volume_properties())
+
+ self._set_args(
+ {"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": False})
+ volume_object = NetAppESeriesVolume()
+ volume_object.pool_detail = {"metadata": [{"key": "workloadId", "value": "12345"}]}
+ volume_object.wait_for_volume_availability = lambda: None
+ volume_object.get_volume = lambda: {"id": "12345'"}
+ volume_object.get_volume_property_changes = lambda: {}
+ volume_object.workload_id = "4200000001000000000000000000000000000000"
+ self.assertFalse(volume_object.update_volume_properties())
+
+ def test_update_volume_properties_fail(self):
+ """Verify exceptions are thrown."""
+ self._set_args(
+ {"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": False})
+ volume_object = NetAppESeriesVolume()
+ volume_object.pool_detail = {"id": "12345"}
+ volume_object.wait_for_volume_availability = lambda: None
+ volume_object.get_volume = lambda: {"id": "12345'"}
+ volume_object.get_volume_property_changes = lambda: {
+ 'cacheSettings': {'readCacheEnable': True, 'writeCacheEnable': True}, 'growthAlertThreshold': 90,
+ 'flashCached': True}
+ volume_object.workload_id = "4200000001000000000000000000000000000000"
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to update volume properties."):
+ with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
+ self.assertTrue(volume_object.update_volume_properties())
+
+ self._set_args(
+ {"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True,
+ "thin_volume_expansion_policy": "manual", "thin_volume_repo_size": 760, "thin_volume_max_repo_size": 1000,
+ "thin_volume_growth_alert_threshold": 90})
+ volume_object = NetAppESeriesVolume()
+ volume_object.pool_detail = {"id": "12345"}
+ volume_object.wait_for_volume_availability = lambda: None
+ volume_object.get_volume = lambda: {"id": "12345'"}
+ volume_object.get_volume_property_changes = lambda: {
+ 'cacheSettings': {'readCacheEnable': True, 'writeCacheEnable': True}, 'growthAlertThreshold': 90,
+ 'flashCached': True}
+ volume_object.workload_id = "4200000001000000000000000000000000000000"
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to update thin volume properties."):
+ with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
+ self.assertTrue(volume_object.update_volume_properties())
+
+ def test_expand_volume_pass(self):
+ """Verify volume expansion."""
+ self._set_args(
+ {"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": False})
+ volume_object = NetAppESeriesVolume()
+ volume_object.get_expand_volume_changes = lambda: {"sizeUnit": "bytes",
+ "expansionSize": 100 * 1024 * 1024 * 1024}
+ volume_object.volume_detail = {"id": "12345", "thinProvisioned": True}
+ with mock.patch(self.REQUEST_FUNC, return_value=(200, {})):
+ volume_object.expand_volume()
+
+ self._set_args(
+ {"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True,
+ "thin_volume_expansion_policy": "manual", "thin_volume_repo_size": 760, "thin_volume_max_repo_size": 1000,
+ "thin_volume_growth_alert_threshold": 90})
+ volume_object = NetAppESeriesVolume()
+ volume_object.get_expand_volume_changes = lambda: {"sizeUnit": "bytes",
+ "expansionSize": 100 * 1024 * 1024 * 1024}
+ volume_object.volume_detail = {"id": "12345", "thinProvisioned": True}
+ with mock.patch(self.REQUEST_FUNC, return_value=(200, {})):
+ volume_object.expand_volume()
+
+ def test_expand_volume_fail(self):
+ """Verify exceptions are thrown."""
+ self._set_args(
+ {"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": False})
+ volume_object = NetAppESeriesVolume()
+ volume_object.get_expand_volume_changes = lambda: {"sizeUnit": "bytes",
+ "expansionSize": 100 * 1024 * 1024 * 1024}
+ volume_object.volume_detail = {"id": "12345", "thinProvisioned": False}
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to expand volume."):
+ with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
+ volume_object.expand_volume()
+
+ self._set_args(
+ {"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True})
+ volume_object = NetAppESeriesVolume()
+ volume_object.get_expand_volume_changes = lambda: {"sizeUnit": "bytes",
+ "expansionSize": 100 * 1024 * 1024 * 1024}
+ volume_object.volume_detail = {"id": "12345", "thinProvisioned": True}
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to expand thin volume."):
+ with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
+ volume_object.expand_volume()
+
+ def test_delete_volume_pass(self):
+ """Verify volume deletion."""
+ self._set_args(
+ {"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": False})
+ volume_object = NetAppESeriesVolume()
+ volume_object.volume_detail = {"id": "12345"}
+ with mock.patch(self.REQUEST_FUNC, return_value=(200, {})):
+ volume_object.delete_volume()
+
+ self._set_args(
+ {"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True,
+ "thin_volume_expansion_policy": "manual", "thin_volume_repo_size": 760, "thin_volume_max_repo_size": 1000,
+ "thin_volume_growth_alert_threshold": 90})
+ volume_object = NetAppESeriesVolume()
+ volume_object.volume_detail = {"id": "12345"}
+ with mock.patch(self.REQUEST_FUNC, return_value=(200, {})):
+ volume_object.delete_volume()
+
+ def test_delete_volume_fail(self):
+ """Verify exceptions are thrown."""
+ self._set_args(
+ {"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": False})
+ volume_object = NetAppESeriesVolume()
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to delete volume."):
+ with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
+ volume_object.delete_volume()
+
+ self._set_args(
+ {"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True})
+ volume_object = NetAppESeriesVolume()
+ with self.assertRaisesRegexp(AnsibleFailJson, "Failed to delete thin volume."):
+ with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
+ volume_object.delete_volume()
diff --git a/ansible_collections/netapp_eseries/santricity/vars/hubPreCheck.groovy b/ansible_collections/netapp_eseries/santricity/vars/hubPreCheck.groovy
new file mode 100644
index 000000000..43f0efefb
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/vars/hubPreCheck.groovy
@@ -0,0 +1,8 @@
+def call(Map optional) {
+ if (optional.docker) {
+ echo "Ensuring that Docker is available on the system."
+ sh """
+ docker --version
+ """
+ }
+}
diff --git a/ansible_collections/netapp_eseries/santricity/vars/hubScan.groovy b/ansible_collections/netapp_eseries/santricity/vars/hubScan.groovy
new file mode 100644
index 000000000..ca99cee47
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/vars/hubScan.groovy
@@ -0,0 +1,13 @@
+def call(Map optional = [:], String projectName, String projectVersion) {
+ optional.projectName = projectName
+ optional.projectVersion = projectVersion
+ call(optional)
+}
+
+def call(Map optional) {
+ // Correctly set if the scan is intended for production.
+ // hubScan uses the variable 'staging' (defaulting to true), and hubScanProject uses 'productionScan' (defaulting to false).
+ optional.productionScan = !((boolean) optional.staging)
+
+ hubScanProject(optional)
+}
diff --git a/ansible_collections/netapp_eseries/santricity/vars/hubScanDocker.groovy b/ansible_collections/netapp_eseries/santricity/vars/hubScanDocker.groovy
new file mode 100644
index 000000000..10ced62f6
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/vars/hubScanDocker.groovy
@@ -0,0 +1,76 @@
+def call(Map optional, String projectName, String projectVersion, String imageDirectory) {
+ optional.projectName = projectName
+ optional.projectVersion = projectVersion
+ optional.imageDirectory = imageDirectory
+ call(optional)
+}
+
+
+def call(Map optional) {
+
+ String projectVersion = optional.projectVersion
+ String projectName = optional.projectName
+ String imageDirectory = optional.imageDirectory
+ String url = "https://blackduck.eng.netapp.com"
+ String credId = 'hubProductionToken'
+
+ if((boolean) optional.staging){
+ url = "https://blackduck-staging.eng.netapp.com"
+ credId = 'hubStagingToken'
+ }
+
+ BLACKDUCK_SKIP_PHONE_HOME = true
+ withCredentials([string(credentialsId: credId, variable: 'TOKEN')]) {
+ String memory = optional.scannerMemoryMb ?: '8192'
+ String logLevel = optional.logLevel ?: 'INFO'
+ String coreCount = optional.coreCount ?: 1
+ String timeoutMinutes = optional.timeout ?: 60
+
+ sh''' wget -qN http://esgweb.eng.netapp.com/~lorenp/synopsys-detect-6.0.0-air-gap.zip -O /tmp/synopsys-detect.zip
+ unzip -u -d /tmp/tools /tmp/synopsys-detect.zip
+ rm -f /tmp/synopsys-detect.zip
+ '''
+
+ // Create the temporary directory for the scan logs
+ def scanTempDir = sh(returnStdout: true, script: "mktemp --directory \"/tmp/synopsys-detect-${projectName}-${projectVersion}-XXXXXXXXXX\"").trim()
+
+ echo "Initiating Hub Scanning Process on every image in ${imageDirectory}"
+ echo "Sending results to ${url}"
+ echo "Using a logLevel of ${logLevel}"
+ echo "Additional parameters: ${optional}"
+ echo "Running with a timeout value of ${timeoutMinutes} minutes"
+
+ // We need to locate all of the images to scan.
+ sh "find ${imageDirectory} -type f -iname '*.tar'> listFiles"
+ def files = readFile( "listFiles" ).split('\n');
+ try {
+ files.each {
+ def fileName = it.split('/')[-1];
+ timeout(time: "${timeoutMinutes}", unit: 'MINUTES') {
+ // Run a single scan for each image we find, using the filename as a scan identifier
+ sh """
+ java -Xms4096m -Xmx8192m -Xss1024m -jar /tmp/tools/synopsys-detect-6.0.0.jar \
+ --blackduck.url=${url} \
+ --detect.blackduck.signature.scanner.memory="${memory}" \
+ --detect.blackduck.signature.scanner.individual.file.matching="ALL" \
+ --blackduck.api.token=${TOKEN} \
+ --detect.docker.tar=${it} \
+ --detect.parallel.processors=${coreCount} \
+ --detect.code.location.name=${projectName}-${projectVersion}-${fileName} \
+ --detect.project.name=${projectName} \
+ --detect.project.version.name=${projectVersion} \
+ --detect.cleanup=false \
+ --blackduck.trust.cert=true \
+ --detect.output.path=${scanTempDir} \
+ --logging.level.com.synopsys.integration="${logLevel}"
+
+ """
+ }
+ }
+ } finally {
+ dir("${scanTempDir}") {
+ deleteDir()
+ }
+ }
+ }
+}
diff --git a/ansible_collections/netapp_eseries/santricity/vars/hubScanProject.groovy b/ansible_collections/netapp_eseries/santricity/vars/hubScanProject.groovy
new file mode 100644
index 000000000..b980d7da7
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/vars/hubScanProject.groovy
@@ -0,0 +1,123 @@
+/**
+ * Initiate a scan of Synopsys Detect. By default the working directory ('./') is scanned and all detectors are enabled.
+ * Java MUST be installed for this to be successful, and it is suggested to scan in a docker container due to the
+ * detector possibly building the project automatically.
+ *
+ * The 'optional' map supports these fields:
+ * - clearPriorScans: false. Clear previous scans (but doesn't delete them) for the associated project and version on the server.
+ * - coreCount: -1. Scanner parallel processors where -1 uses the number of cores on the system.
+ * - disableDetector: false. Disable the synopsys detector; the detector SHOULD be run but it can result in build issues
+ * and can be disabled.
+ * - logLevel: info. Logging level of synopsys.
+ * - productionScan: false. Set this to true to send scan results to the production blackduck server; staging is used by default.
+ * - scanOpts: [:]. A map of additional hub command-line arguments, or overrides, depending on project needs. for example,
+ * users can control the detector search depth with optional.scanOpts["--detect.detector.search.depth"] = "0".
+ * - scannerMemoryMB: 1024.
+ * - timeout: 60. Maximum scan timeout, in minutes, before failing the build.
+ *
+ * Important implementation notes:
+ * - Java must be installed and in the path.
+ * - A temporary directory, scanTempDir, is created at '/tmp/synopsys-detect-<projectName>-<projectVersion>-XXXXXXXX'.
+ * This temporary is DELETED after the scan to avoid excessive storage usage.
+ * - Synopsys Detect Air Gap (600MB+ zip, 1.5GB+ extracted) is generated at '$scanTempDir/synopsys-detect-air-gap/<synopVersion>'.
+ * This path is deleted along with the temp dir after the scan.
+ * - The files in $scanTempDir/runs/** are archived.
+ * - URLs
+ * - https://synopsys.atlassian.net/wiki/spaces/INTDOCS/pages/622673/Synopsys+Detect+Properties
+ * - https://synopsys.atlassian.net/wiki/spaces/INTDOCS/pages/62423113/Synopsys+Detect
+ *
+ * @param optional map of optional arguments
+ * @param projectName the name of the project
+ * @param projectVersion the version of the project
+ */
+def call(Map optional = [:], String projectName, String projectVersion) {
+ optional.projectName = projectName
+ optional.projectVersion = projectVersion
+ optional.scanOpts = (Map) optional.scanOpts ?: [:]
+ call(optional)
+}
+
+def call(Map optional) {
+ String projectVersion = optional.projectVersion
+ String projectName = optional.projectName
+ String synopsysDetectVersion = optional.synopsysDetectVersion ?: "6.3.0"
+ BLACKDUCK_SKIP_PHONE_HOME = true
+
+ String url = "https://blackduck-staging.eng.netapp.com"
+ String credId = 'hubStagingToken'
+
+ // Use the production server if productionScan is explicitly set to true
+ if (new Boolean(optional.productionScan)) {
+ url = "https://blackduck.eng.netapp.com"
+ credId = 'hubProductionToken'
+ }
+
+ withCredentials([string(credentialsId: credId, variable: 'TOKEN')]) {
+ String timeoutMinutes = optional.timeout ?: 60
+
+ // Create the temporary directory for the scan logs and the extracted hub-detect zip
+ def scanTempDir = sh(returnStdout: true, script: "mktemp --directory \"/tmp/synopsys-detect-${projectName}-${projectVersion}-XXXXXXXXXX\"").trim()
+ def synopsysDir = "${scanTempDir}/synopsys-detect-air-gap/${synopsysDetectVersion}"
+ setupSynopsysDetect(synopsysDetectVersion, synopsysDir: synopsysDir)
+
+ echo "Using temporary directory ${scanTempDir}"
+ echo "Sending results to ${url}"
+ echo "Additional parameters: ${optional}"
+ echo "Using timeout of ${timeoutMinutes} minutes"
+
+ Map m = [:]
+ m["--blackduck.trust.cert"] = "true"
+ m["--blackduck.url"] = url
+ m["--blackduck.api.token"] = TOKEN
+ m["--detect.project.name"] = projectName
+ m["--detect.project.version.name"] = projectVersion
+ m["--detect.code.location.name"] = "${projectName}-${projectVersion}"
+ m["--detect.project.codelocation.unmap"] = optional.clearPriorScans ?: "false"
+ m["--detect.blackduck.signature.scanner.memory"] = optional.scannerMemoryMB ?: "1024"
+ m["--detect.parallel.processors"] = optional.coreCount ?: -1
+ m["--detect.cleanup"] = "false"
+ m["--detect.blackduck.signature.scanner.paths"] = optional.scanDir ?: './'
+ m["--detect.output.path"] = scanTempDir
+ m["--logging.level.com.synopsys.integration"] = optional.logLevel ?: "INFO"
+ m["--detect.detector.search.depth"] = "3"
+ m["--detect.sbt.report.depth"] = "3"
+ m["--detect.blackduck.signature.scanner.exclusion.name.patterns"] = "node_modules,.git,.gradle"
+ m["--detect.blackduck.signature.scanner.exclusion.pattern.search.depth"] = "30"
+ m["--detect.docker.inspector.air.gap.path"] = "${synopsysDir}/packaged-inspectors/docker"
+ m["--detect.nuget.inspector.air.gap.path"] = "${synopsysDir}/packaged-inspectors/nuget"
+ m["--detect.gradle.inspector.air.gap.path"] = "${synopsysDir}/packaged-inspectors/gradle"
+ m["--detect.blackduck.signature.scanner.individual.file.matching"] = "ALL"
+
+ if (optional.cloneVersion) {
+ m["--detect.clone.project.version.name"] = optional.cloneVersion
+ }
+ if ((boolean) optional.disableDetector) {
+ m["--detect.tools.excluded"] = "DETECTOR"
+ }
+
+ m.putAll((Map) optional.scanOpts)
+
+ synopsysArgs = m.collectEntries { k, v -> ["$k=$v"] }.keySet().join(" \\\n ")
+ synopsysExec = "java -Xms1024m -Xmx2048m -jar ${synopsysDir}/synopsys-detect-${synopsysDetectVersion}.jar ${synopsysArgs}"
+ echo "The blackduck scan execute command: \n'${synopsysExec}'"
+
+ try {
+ timeout(time: "${timeoutMinutes}", unit: 'MINUTES') {
+ sh """
+ ${synopsysExec}
+ # Delete any existing docker extractions from this scan to avoid excessive storage use.
+ rm -rf ${scanTempDir}/runs/*/extractions || true
+ mv ${scanTempDir}/runs synopsysRuns
+ """
+
+ // NOTE: Archiving works **ONLY** in the build workspace. All artifacts must be copied to the workspace.
+ // Ignore gz to avoid archiving docker images.
+ archiveArtifacts artifacts: "synopsysRuns/**", excludes: "**/*.gz"
+ }
+ } finally {
+ dir("${scanTempDir}") {
+ deleteDir()
+ }
+ }
+ }
+}
diff --git a/ansible_collections/netapp_eseries/santricity/vars/setupBlackduckBuildParameters.groovy b/ansible_collections/netapp_eseries/santricity/vars/setupBlackduckBuildParameters.groovy
new file mode 100644
index 000000000..c2e15a089
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/vars/setupBlackduckBuildParameters.groovy
@@ -0,0 +1,16 @@
+def call(Map options = [:]) {
+ String buildArtifactKeepNum = options.buildArtifactKeepNum ?: '15'
+ String buildKeepNum = options.buildKeepNum ?: '30'
+ // The default cron schedule is one build between 1:xx pm - 4:xx pm on Monday
+ String buildCronSchedule = options.buildCronSchedule ?: 'H H(13-16) * * 1'
+
+ properties([
+ parameters([
+ choice(name: 'logLevel', choices: ['WARN', 'INFO', 'DEBUG', 'TRACE'], description: 'Set the logging level. WARN is the default.')
+ ]),
+ buildDiscarder(
+ logRotator(artifactNumToKeepStr: buildArtifactKeepNum, numToKeepStr: buildKeepNum)
+ ),
+ pipelineTriggers([cron(buildCronSchedule)])
+ ])
+}
diff --git a/ansible_collections/netapp_eseries/santricity/vars/setupBuildParameters.groovy b/ansible_collections/netapp_eseries/santricity/vars/setupBuildParameters.groovy
new file mode 100644
index 000000000..8e0495757
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/vars/setupBuildParameters.groovy
@@ -0,0 +1,3 @@
+def call(Map options = [:]) {
+ setupBlackduckBuildParameters(options)
+}
diff --git a/ansible_collections/netapp_eseries/santricity/vars/setupSynopsysDetect.groovy b/ansible_collections/netapp_eseries/santricity/vars/setupSynopsysDetect.groovy
new file mode 100644
index 000000000..f5eed5c4c
--- /dev/null
+++ b/ansible_collections/netapp_eseries/santricity/vars/setupSynopsysDetect.groovy
@@ -0,0 +1,15 @@
+
+def call(Map options = [:], String synopsysDetectVersion) {
+ options.synopsysDir = options.synopsysDir ?: "/tmp/synopsys-detect-air-gap/${synopsysDetectVersion}"
+ if (new File(options.synopsysDir).exists()) {
+ echo "No need to fetch synopsys-${synopsysDetectVersion}, directory exists ${options.synopsysDir}"
+ return
+ }
+
+ sh """
+ wget -qN http://esgweb.eng.netapp.com/~blucas/packages/synopsys-detect-${synopsysDetectVersion}-air-gap.zip -O synopsys-detect.zip
+ mkdir -p ${options.synopsysDir}
+ unzip -q -d ${options.synopsysDir} -u synopsys-detect.zip
+ rm -f synopsys-detect.zip
+ """
+}