summaryrefslogtreecommitdiffstats
path: root/ansible_collections/infinidat/infinibox
diff options
context:
space:
mode:
Diffstat (limited to 'ansible_collections/infinidat/infinibox')
-rw-r--r--ansible_collections/infinidat/infinibox/.gitignore12
-rw-r--r--ansible_collections/infinidat/infinibox/.gitlab-ci.yml6
-rw-r--r--ansible_collections/infinidat/infinibox/CHANGELOG.rst38
-rw-r--r--ansible_collections/infinidat/infinibox/FILES.json726
-rw-r--r--ansible_collections/infinidat/infinibox/MANIFEST.json10
-rw-r--r--ansible_collections/infinidat/infinibox/Makefile333
-rw-r--r--ansible_collections/infinidat/infinibox/Makefile-help82
-rw-r--r--ansible_collections/infinidat/infinibox/README-DEV.md21
-rw-r--r--ansible_collections/infinidat/infinibox/README.md25
-rw-r--r--ansible_collections/infinidat/infinibox/ibox_vars/iboxCICD.yaml19
-rw-r--r--ansible_collections/infinidat/infinibox/ibox_vars/iboxNNNN_example.yaml2
-rw-r--r--ansible_collections/infinidat/infinibox/ibox_vars/vibox.yaml134
-rw-r--r--ansible_collections/infinidat/infinibox/meta/runtime.yml2
-rw-r--r--ansible_collections/infinidat/infinibox/playbooks/configure_array.yml388
-rw-r--r--ansible_collections/infinidat/infinibox/playbooks/infinisafe_demo_runtest.yml35
-rw-r--r--ansible_collections/infinidat/infinibox/playbooks/infinisafe_demo_setup.yml17
-rw-r--r--ansible_collections/infinidat/infinibox/playbooks/infinisafe_demo_teardown.yml35
-rw-r--r--ansible_collections/infinidat/infinibox/playbooks/inventory14
-rw-r--r--ansible_collections/infinidat/infinibox/playbooks/test_config_sample.yml63
-rw-r--r--ansible_collections/infinidat/infinibox/playbooks/test_create_map_cluster.yml101
-rw-r--r--ansible_collections/infinidat/infinibox/playbooks/test_create_metadata.yml813
-rw-r--r--ansible_collections/infinidat/infinibox/playbooks/test_create_network_spaces.yml21
-rw-r--r--ansible_collections/infinidat/infinibox/playbooks/test_create_resources.yml399
-rw-r--r--ansible_collections/infinidat/infinibox/playbooks/test_create_snapshots.yml47
-rw-r--r--ansible_collections/infinidat/infinibox/playbooks/test_create_volumes.yml93
-rw-r--r--ansible_collections/infinidat/infinibox/playbooks/test_notification_rules_sample.yml111
-rw-r--r--ansible_collections/infinidat/infinibox/playbooks/test_remove_map_cluster.yml65
-rw-r--r--ansible_collections/infinidat/infinibox/playbooks/test_remove_metadata.yml80
-rw-r--r--ansible_collections/infinidat/infinibox/playbooks/test_remove_network_spaces.yml14
-rw-r--r--ansible_collections/infinidat/infinibox/playbooks/test_remove_resources.yml118
-rw-r--r--ansible_collections/infinidat/infinibox/playbooks/test_remove_snapshots.yml17
-rw-r--r--ansible_collections/infinidat/infinibox/playbooks/test_remove_users_repository.yml80
-rw-r--r--ansible_collections/infinidat/infinibox/playbooks/test_remove_volumes.yml34
-rw-r--r--ansible_collections/infinidat/infinibox/plugins/doc_fragments/infinibox.py2
-rw-r--r--ansible_collections/infinidat/infinibox/plugins/filter/psus_filters.py3
-rw-r--r--ansible_collections/infinidat/infinibox/plugins/module_utils/infinibox.py228
-rw-r--r--ansible_collections/infinidat/infinibox/plugins/modules/infini_certificate.py199
-rw-r--r--ansible_collections/infinidat/infinibox/plugins/modules/infini_cluster.py163
-rw-r--r--ansible_collections/infinidat/infinibox/plugins/modules/infini_config.py238
-rw-r--r--ansible_collections/infinidat/infinibox/plugins/modules/infini_event.py141
-rw-r--r--ansible_collections/infinidat/infinibox/plugins/modules/infini_export.py78
-rw-r--r--ansible_collections/infinidat/infinibox/plugins/modules/infini_export_client.py44
-rw-r--r--ansible_collections/infinidat/infinibox/plugins/modules/infini_fibre_channel_switch.py180
-rw-r--r--ansible_collections/infinidat/infinibox/plugins/modules/infini_fs.py517
-rw-r--r--ansible_collections/infinidat/infinibox/plugins/modules/infini_host.py62
-rw-r--r--ansible_collections/infinidat/infinibox/plugins/modules/infini_map.py269
-rw-r--r--ansible_collections/infinidat/infinibox/plugins/modules/infini_metadata.py674
-rw-r--r--ansible_collections/infinidat/infinibox/plugins/modules/infini_network_space.py272
-rw-r--r--ansible_collections/infinidat/infinibox/plugins/modules/infini_notification_rule.py360
-rw-r--r--ansible_collections/infinidat/infinibox/plugins/modules/infini_notification_target.py361
-rw-r--r--ansible_collections/infinidat/infinibox/plugins/modules/infini_pool.py47
-rw-r--r--ansible_collections/infinidat/infinibox/plugins/modules/infini_port.py51
-rw-r--r--ansible_collections/infinidat/infinibox/plugins/modules/infini_sso.py299
-rw-r--r--ansible_collections/infinidat/infinibox/plugins/modules/infini_user.py431
-rw-r--r--ansible_collections/infinidat/infinibox/plugins/modules/infini_users_repository.py534
-rw-r--r--ansible_collections/infinidat/infinibox/plugins/modules/infini_vol.py310
-rw-r--r--ansible_collections/infinidat/infinibox/requirements-dev.txt1
-rw-r--r--ansible_collections/infinidat/infinibox/scripts/syslog.log437
-rwxr-xr-xansible_collections/infinidat/infinibox/scripts/syslog_server.py52
-rw-r--r--ansible_collections/infinidat/infinibox/tests/config.yml2
-rw-r--r--ansible_collections/infinidat/infinibox/tests/hacking/infini_certificate_absent.json8
-rw-r--r--ansible_collections/infinidat/infinibox/tests/hacking/infini_certificate_present.json9
-rw-r--r--ansible_collections/infinidat/infinibox/tests/hacking/infini_certificate_stat.json8
-rw-r--r--ansible_collections/infinidat/infinibox/tests/hacking/infini_cluster_absent.json9
-rw-r--r--ansible_collections/infinidat/infinibox/tests/hacking/infini_cluster_present.json15
-rw-r--r--ansible_collections/infinidat/infinibox/tests/hacking/infini_cluster_stat.json9
-rw-r--r--ansible_collections/infinidat/infinibox/tests/hacking/infini_event_present.json10
-rw-r--r--ansible_collections/infinidat/infinibox/tests/hacking/infini_fibre_channel_switch_rename.json10
-rw-r--r--ansible_collections/infinidat/infinibox/tests/hacking/infini_fibre_channel_switch_stat.json9
-rw-r--r--ansible_collections/infinidat/infinibox/tests/hacking/infini_fs_absent.json10
-rw-r--r--ansible_collections/infinidat/infinibox/tests/hacking/infini_fs_present.json13
-rw-r--r--ansible_collections/infinidat/infinibox/tests/hacking/infini_fs_stat.json11
-rw-r--r--ansible_collections/infinidat/infinibox/tests/hacking/infini_host_absent.json9
-rw-r--r--ansible_collections/infinidat/infinibox/tests/hacking/infini_host_present.json9
-rw-r--r--ansible_collections/infinidat/infinibox/tests/hacking/infini_host_stat.json9
-rw-r--r--ansible_collections/infinidat/infinibox/tests/hacking/infini_map_absent.json10
-rw-r--r--ansible_collections/infinidat/infinibox/tests/hacking/infini_map_present.json10
-rw-r--r--ansible_collections/infinidat/infinibox/tests/hacking/infini_map_stat.json10
-rw-r--r--ansible_collections/infinidat/infinibox/tests/hacking/infini_metadata_absent.json11
-rw-r--r--ansible_collections/infinidat/infinibox/tests/hacking/infini_metadata_present.json12
-rw-r--r--ansible_collections/infinidat/infinibox/tests/hacking/infini_metadata_stat.json11
-rw-r--r--ansible_collections/infinidat/infinibox/tests/hacking/infini_network_space_absent.json9
-rw-r--r--ansible_collections/infinidat/infinibox/tests/hacking/infini_network_space_present.json26
-rw-r--r--ansible_collections/infinidat/infinibox/tests/hacking/infini_network_space_stat.json9
-rw-r--r--ansible_collections/infinidat/infinibox/tests/hacking/infini_notification_rule_absent.json9
-rw-r--r--ansible_collections/infinidat/infinibox/tests/hacking/infini_notification_rule_present.json11
-rw-r--r--ansible_collections/infinidat/infinibox/tests/hacking/infini_notification_rule_stat.json9
-rw-r--r--ansible_collections/infinidat/infinibox/tests/hacking/infini_notification_target_absent.json9
-rw-r--r--ansible_collections/infinidat/infinibox/tests/hacking/infini_notification_target_present.json15
-rw-r--r--ansible_collections/infinidat/infinibox/tests/hacking/infini_notification_target_stat.json9
-rw-r--r--ansible_collections/infinidat/infinibox/tests/hacking/infini_sso_absent.json9
-rw-r--r--ansible_collections/infinidat/infinibox/tests/hacking/infini_sso_present.json15
-rw-r--r--ansible_collections/infinidat/infinibox/tests/hacking/infini_sso_stat.json9
-rw-r--r--ansible_collections/infinidat/infinibox/tests/hacking/infini_user_absent.json9
-rw-r--r--ansible_collections/infinidat/infinibox/tests/hacking/infini_user_ldap_absent.json9
-rw-r--r--ansible_collections/infinidat/infinibox/tests/hacking/infini_user_ldap_present.json16
-rw-r--r--ansible_collections/infinidat/infinibox/tests/hacking/infini_user_ldap_stat.json9
-rw-r--r--ansible_collections/infinidat/infinibox/tests/hacking/infini_user_login.json10
-rw-r--r--ansible_collections/infinidat/infinibox/tests/hacking/infini_user_present.json16
-rw-r--r--ansible_collections/infinidat/infinibox/tests/hacking/infini_user_simple_absent.json9
-rw-r--r--ansible_collections/infinidat/infinibox/tests/hacking/infini_user_simple_login.json10
-rw-r--r--ansible_collections/infinidat/infinibox/tests/hacking/infini_user_simple_present.json13
-rw-r--r--ansible_collections/infinidat/infinibox/tests/hacking/infini_user_simple_stat.json9
-rw-r--r--ansible_collections/infinidat/infinibox/tests/hacking/infini_user_stat.json9
-rw-r--r--ansible_collections/infinidat/infinibox/tests/hacking/infini_users_repository_absent.json9
-rw-r--r--ansible_collections/infinidat/infinibox/tests/hacking/infini_users_repository_ad_absent.json9
-rw-r--r--ansible_collections/infinidat/infinibox/tests/hacking/infini_users_repository_ad_present.json19
-rw-r--r--ansible_collections/infinidat/infinibox/tests/hacking/infini_users_repository_ad_stat.json9
-rw-r--r--ansible_collections/infinidat/infinibox/tests/hacking/infini_users_repository_ldap_absent.json11
-rw-r--r--ansible_collections/infinidat/infinibox/tests/hacking/infini_users_repository_ldap_present.json20
-rw-r--r--ansible_collections/infinidat/infinibox/tests/hacking/infini_users_repository_ldap_stat.json9
-rw-r--r--ansible_collections/infinidat/infinibox/tests/hacking/infini_users_repository_present.json19
-rw-r--r--ansible_collections/infinidat/infinibox/tests/hacking/infini_users_repository_stat.json9
-rw-r--r--ansible_collections/infinidat/infinibox/tests/hacking/infini_vol_absent.json11
-rw-r--r--ansible_collections/infinidat/infinibox/tests/hacking/infini_vol_present.json13
-rw-r--r--ansible_collections/infinidat/infinibox/tests/hacking/infini_vol_stat.json11
-rw-r--r--ansible_collections/infinidat/infinibox/tests/sanity/ignore-2.10.txt0
117 files changed, 8711 insertions, 1818 deletions
diff --git a/ansible_collections/infinidat/infinibox/.gitignore b/ansible_collections/infinidat/infinibox/.gitignore
index bd0ad5f4f..dc3c3854f 100644
--- a/ansible_collections/infinidat/infinibox/.gitignore
+++ b/ansible_collections/infinidat/infinibox/.gitignore
@@ -1,4 +1,5 @@
vault_password.txt
+Makefile-vars
# build products...
*.py[co]
build
@@ -73,6 +74,7 @@ coverage.xml
# Development
tests/hacking
tests/sanity
+tests/output
/test/develop
venv
Vagrantfile
@@ -110,3 +112,13 @@ changelogs/.plugin-cache.yaml
# secrets
ibox_vars/*.yaml
collections/
+
+# SSL
+ca.csr
+infinidat/
+private-key.pem
+scripts/create-vols.sh
+signed-certificate-no-pkey.pem
+signed-certificate-with-pkey.pem
+
+infinidat/
diff --git a/ansible_collections/infinidat/infinibox/.gitlab-ci.yml b/ansible_collections/infinidat/infinibox/.gitlab-ci.yml
index 013f6976b..ff749c98a 100644
--- a/ansible_collections/infinidat/infinibox/.gitlab-ci.yml
+++ b/ansible_collections/infinidat/infinibox/.gitlab-ci.yml
@@ -5,7 +5,7 @@
# - VAULT_PASSWORD_FILE
# - Type: File
# - Key: VAULT_PASSWORD_FILE
-# - Value: <ansible vault password for the vars file for the ibox specified in --extra-vars>
+# - Value: <ansible vault password for the vars file for the ibox specified in --extra-vars>
# - i.e. The password that allows one to view the file using "ansible-vault view <file>"
image: psusdev/gitlab-cicd:v0.14
@@ -52,7 +52,7 @@ playbook_testing:
- popd > /dev/null
# Show summery of execution tasks
- - ./bin/test_summarize.sh general
+ - ./scripts/test_summarize.sh general
# Run testing map cluster playbooks
- git_project="ansible-infinidat-collection"
@@ -67,4 +67,4 @@ playbook_testing:
- popd > /dev/null
# Show summery of execution tasks
- - ./bin/test_summarize.sh map-cluster
+ - ./scripts/test_summarize.sh map-cluster
diff --git a/ansible_collections/infinidat/infinibox/CHANGELOG.rst b/ansible_collections/infinidat/infinibox/CHANGELOG.rst
index 482aa5b52..b267befe0 100644
--- a/ansible_collections/infinidat/infinibox/CHANGELOG.rst
+++ b/ansible_collections/infinidat/infinibox/CHANGELOG.rst
@@ -3,6 +3,44 @@ Change Log
==========
-------------------
+v1.4.1 (2024-02-06)
+-------------------
+
+^^^^^^^^^^^^^^^^^^^^
+Feature Enhancements
+^^^^^^^^^^^^^^^^^^^^
+* Require Ansible >= 2.14.0
+* psdev-1178: Add infini_infinimetrics moduile. Allows add an Infinibox to Infinimetrics.
+
+-------------------
+v1.4.0 (2024-02-05)
+-------------------
+
+^^^^^^^^^
+Bug Fixes
+^^^^^^^^^
+* The default for the write_protected parameter when creating a master volume or master file system has changed from true to false. For snapshots, the default is true.
+* psdev-1147: Fix an issue network space module where when removing a space the management interface was not removed last. This is required.
+
+^^^^^^^^^^^^^^^^^^^^
+Feature Enhancements
+^^^^^^^^^^^^^^^^^^^^
+* psdev-1138: Add infini_sso module. Allow SSO configuration.
+* psdev-1151: Add infini_fibre_channel_switch module. Allow renaming of FC switches.
+* psdev-1148: Add infini_certificate module. Allow uploading SSL certificates.
+* psdev-1045: Add infini_event module. Allow posting of custom events.
+* Add infini_config module.
+* Add infini_notification_rule module.
+* Add infini_notification_target module.
+* psdev-1108: Provide configure_array.yml playbook. This is an example playbook demonstrating detailed configuration of Infiniboxes. It is idempotent so may be run against new or existing Infiniboxes repeatedly.
+* psdev-1147: Implement network space module present state to handle updating parameters in an existing network space. Add support for is_async option.
+* psdev-1108: Add state "login" to infini_user module. This tests credentials. Added to support Active Directory testing.
+* Add syslog_server script to allow testing of syslog notifications.
+* Add new infini_users_repository module. Use this module to configure Active Directory and LDAP resournces on an Infinibox.
+* Add new infini_metadata module. This module will set, get and remove metadata (keys and values) to and from objects of these types: ["cluster", "fs", "fs-snap", "host", "pool", "system", "vol", "vol-snap"].
+* Add snapshot support to the infini_fs module. File system snapshot locks, regular and immutable are supported.
+
+-------------------
v1.3.12 (2022-12-04)
-------------------
diff --git a/ansible_collections/infinidat/infinibox/FILES.json b/ansible_collections/infinidat/infinibox/FILES.json
index 5f99555df..e5a36d2bf 100644
--- a/ansible_collections/infinidat/infinibox/FILES.json
+++ b/ansible_collections/infinidat/infinibox/FILES.json
@@ -22,38 +22,31 @@
"format": 1
},
{
- "name": "tests",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/sanity",
+ "name": "ibox_vars",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
- "name": "tests/sanity/ignore-2.10.txt",
+ "name": "ibox_vars/iboxNNNN_example.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "chksum_sha256": "888179c43aa1427102ee53ba7d2a1439acd6e4ae3d9c05ce1f71fa373b315af8",
"format": 1
},
{
- "name": "tests/hacking",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
+ "name": "ibox_vars/iboxCICD.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "558e286155b4ecf459ded4d0187b317f69201750e6ec1e09abbb7b247e3f9ad3",
"format": 1
},
{
- "name": "tests/hacking/infini_vol_present_example.json",
+ "name": "ibox_vars/vibox.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "23e4089f3d750800a7c782a3d579a53f298d366f0b1f84431a02414c12b81160",
+ "chksum_sha256": "82cebc7345d44c418a05b7309bc09f69e3e0192da55a19b7ba87f0d7b14be5d0",
"format": 1
},
{
@@ -74,178 +67,220 @@
"name": "meta/runtime.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a64562102c20fa61d7e538a3868d082f7aa20e32bc3bbccfba7e1dda5da45947",
+ "chksum_sha256": "2af7f216ad53afd39bb0c11d4db64e18bc31056b50827d6791e9631978ac9048",
"format": 1
},
{
- "name": "bin",
+ "name": "playbooks",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
- "name": "Makefile",
+ "name": "playbooks/ansible.cfg",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "50951adfabb9adde022b27b189197b0b2064f35c330d4fa24889c616932404f0",
+ "chksum_sha256": "6946120d2926bdde34018f3507e119c2e5f7d08e74ab25fe42e9ab61c7d07e62",
"format": 1
},
{
- "name": "README.md",
+ "name": "playbooks/inventory",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "53fccfe2fb4c05ec005e5fcd3ed0f539cd8ba8258a972e1d082cd3f23d4f463d",
+ "chksum_sha256": "f164a40b003a7fde993412caeff0d770199a1c3ee0cdc6c68735812c192766de",
"format": 1
},
{
- "name": ".gitlab-ci.yml",
+ "name": "playbooks/test_remove_volumes.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "55ffc971ac57019980f0d6b9748ba0630548afe86d548e950cd952cfd67fd2cf",
+ "chksum_sha256": "728a42b2e20165389e133398db73e4d648390a63f2cbce79d75a94ecdb4fc25a",
"format": 1
},
{
- "name": "ibox_vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
+ "name": "playbooks/configure_array.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dd8225826ce2fcc0f59aeadef0136a962cd8c89fe40ac4001ffaa2d27864a01d",
"format": 1
},
{
- "name": "ibox_vars/iboxCICD.yaml",
+ "name": "playbooks/test_create_volumes.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "c5e5ea378ed20e66e96c3fafee6c53845ed534c27804ee82ccdeca7edb23d8e5",
+ "chksum_sha256": "7e1da86de6b914946140f28ffc7235418ca4817ac6a0f40a601b4d81d8bf53bc",
"format": 1
},
{
- "name": "ibox_vars/iboxNNNN_example.yaml",
+ "name": "playbooks/test_create_resources.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "97812c7706f088a4b0f926a7fee966c20ed788fa5ec42814e4f94d2ba66404f8",
+ "chksum_sha256": "2cd6cfcab9da33e723a25aecf9a0075fb4afac8b28d388344894f5aa1b60429b",
"format": 1
},
{
- "name": "Makefile-help",
+ "name": "playbooks/test_remove_metadata.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "1c1bf806cc1b36461f41ce8993811ee8c4defa0d39c8fbf471236722482e396e",
+ "chksum_sha256": "893dffb4ea899a0fec1c3ebbe5a08c4f6d7ddfb78002ef79870a9eb219ea4a39",
"format": 1
},
{
- "name": "playbooks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
+ "name": "playbooks/test_remove_users_repository.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4c98c2caa0f8720d5d08ac7362e1794dc2be3191b2e15d421183b1cc3bd77afb",
"format": 1
},
{
"name": "playbooks/test_remove_network_spaces.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "567cc99c829f63ac287e80550884a4698f9c80210eece857150203d8320b57fe",
+ "chksum_sha256": "93c179b4918a5aa94e23f83206f9cf891851e10beb212b05b715a31efab90f8d",
+ "format": 1
+ },
+ {
+ "name": "playbooks/test_remove_resources.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "edabc5214a7c5f092da78c73760abe87d48c3079c6d77f5b57ef4c33feec4bb4",
"format": 1
},
{
"name": "playbooks/infinisafe_demo_runtest.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "d9b28566237125f820184a36c653281312717e5747c97d95ccf3fc13054739a1",
+ "chksum_sha256": "e082c88681f4080a3be10a9e3a30dc9bfd4f3f303b5eec573a4e2da6268561a7",
"format": 1
},
{
- "name": "playbooks/test_create_snapshots.yml",
+ "name": "playbooks/infinisafe_demo_teardown.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a1a29f53a43d35dc13e144bd85ac69eec0513621d869c24c9765f1491b1e2e77",
+ "chksum_sha256": "8be2494710a5ea4fd7735cfe0ef52d8e9fd721a5a8d7a7e660f10040a88306c2",
"format": 1
},
{
- "name": "playbooks/test_remove_snapshots.yml",
+ "name": "playbooks/test_config_sample.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "c1847ecb16b3a825ab494a8e992323c63940e9c0f6b9fb705a17e90393b2645d",
+ "chksum_sha256": "313404d41862db332f3a71a1c8092f25c581890cba992b131a7fe0a79a21b138",
"format": 1
},
{
- "name": "playbooks/infinisafe_demo_setup.yml",
+ "name": "playbooks/test_remove_map_cluster.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "08e1b350fed21d7dc483e9f5993bb067ee81ac7a4b3385eac75e9e7ae0149ccb",
+ "chksum_sha256": "a3634e90f75e0bd7b430298fc03451754d13944d68204a98682879d2b78f5ca3",
"format": 1
},
{
- "name": "playbooks/test_create_network_spaces.yml",
+ "name": "playbooks/test_remove_snapshots.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a31ec0f9cd1da16136398b3f860b8ca008945e679475b712a3cbfd09ccdaffe5",
+ "chksum_sha256": "173991de0060fe4ff62aed19e9eb638a640e4f52032e68534b69ad0cefbd0270",
"format": 1
},
{
"name": "playbooks/test_create_map_cluster.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "87c95887ad3422b6edf744f462a68437c27315f39539e13af7a8186e8631050d",
+ "chksum_sha256": "09ccf475f6692eb69732ed4f211e7f2ad9a99e54fe138ccaf8ceabff5ca88ac2",
"format": 1
},
{
- "name": "playbooks/inventory",
+ "name": "playbooks/test_create_network_spaces.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "93d1758720c0534d6c7902f91dd6dc14b1e429b24f330f5903267910d43c50b4",
+ "chksum_sha256": "1535c4029ab50a492deb5ccc5844f8db925943edea36dd8596928831fafb8141",
"format": 1
},
{
- "name": "playbooks/test_create_resources.yml",
+ "name": "playbooks/test_create_snapshots.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "4f02dc52aa3ac47820454ff2785e913d5d12bab13f7402111a8b2985d8ea09b3",
+ "chksum_sha256": "8a3eecfb7b5badf77d674766b8366fe8846b9a11531e63044de07858a93cf64b",
"format": 1
},
{
- "name": "playbooks/infinisafe_demo_teardown.yml",
+ "name": "playbooks/infinisafe_demo_setup.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f4692d6ddc56c3dbc867e8050a7d5c3faffc7be87bcf22ef4c1476deca9e1a5e",
+ "chksum_sha256": "3d4a1ac66d47a7ff4a27c28da52d5eb325fa094c9906e7464fbd24169580c970",
"format": 1
},
{
- "name": "playbooks/ansible.cfg",
+ "name": "playbooks/test_create_metadata.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "6946120d2926bdde34018f3507e119c2e5f7d08e74ab25fe42e9ab61c7d07e62",
+ "chksum_sha256": "1059126c26f8e359c5532524202b52d19c33bc09d2f263b2630f710915a1eb86",
"format": 1
},
{
- "name": "playbooks/test_remove_map_cluster.yml",
+ "name": "playbooks/test_notification_rules_sample.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "576591f3fd874080e7b233231204cf1fbfb89776d1ae0fc645b74c69f686cdf8",
+ "chksum_sha256": "3dc6ba9c5ca2870b1f6be35225c251d30c6cf748576bf5d01f03078c1f2481c1",
"format": 1
},
{
- "name": "playbooks/test_remove_resources.yml",
+ "name": "plugins",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/infinibox.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "9ac5c717f23ddd5a6553ef4d3fb9a6f89b78bd6977dcc120842d3315f0ec2018",
+ "chksum_sha256": "b48bc46ce2bbd9c455d06d6ce8b00352a3957c7b09e848f95821bcbfa1915b92",
"format": 1
},
{
- "name": "CHANGELOG.rst",
+ "name": "plugins/filter",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/filter/delta_time.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "305df25a3023d0c38d95beea1b98d819a86aabda96be05b08d3b23dbf0530d6d",
+ "chksum_sha256": "8d8fd570aa879ec7e5436d9981eb561e8fea5b30d12def6767043b0a25909506",
"format": 1
},
{
- "name": "plugins",
+ "name": "plugins/filter/psus_filters.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1320aad99ff8d66af7181faf2c57682bf281a717616e645e018c558b709a8c06",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
+ "name": "plugins/module_utils/infinibox.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "31e2e3d9c4bc5d1d6f9e9f86818f2480abe4154315fdd27c7851486681de8214",
+ "format": 1
+ },
+ {
"name": "plugins/modules",
"ftype": "dir",
"chksum_type": null,
@@ -253,171 +288,654 @@
"format": 1
},
{
- "name": "plugins/modules/infini_fs.py",
+ "name": "plugins/modules/__init__.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "3d6dd884674f25dcafaf90b8de0e68db49afd8bff874b74d014988382b508c3d",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
"format": 1
},
{
- "name": "plugins/modules/infini_port.py",
+ "name": "plugins/modules/infini_fibre_channel_switch.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "464597d73393c954f79b89aeb5cd43724f780b5dcb1d7925808ace7ed9d1ade7",
+ "chksum_sha256": "3d651158ae13d2f0f6a93fcb3c0af5aa283112dc630f45d8c28fcd8dbb04f331",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/infini_config.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b1f1fd399da6508c04d316abb3ef68528b886e24079eaaa8d17adb64e9478f13",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/infini_user.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "08f51eb7d0e643002fc713857d70b8ed2942f94d87d91d4e953171c0b62ca358",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/infini_certificate.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "61d0e4c27e20f4d243dafd43b527184ea3e502d30b94d6026e60f514c038f1a7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/infini_event.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "508e07f78215cee1eb0c01f12a8ef17898220bee30973d3a683443e239d66915",
"format": 1
},
{
"name": "plugins/modules/infini_map.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a709dd42075d2b37cef00dff8a3fdbc3107fd6da5847fd69bc8cebb36445412f",
+ "chksum_sha256": "f965ef038395328bcf0d83309c2801010773daa43a45c45700b3eac31c14e584",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/infini_export_client.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8f82c0187d8be212f2da1590a1f1c5ca3ff0f508d6c7338405b618a86d1770a1",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/infini_users_repository.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bdd2b320b2da411ca81e79f94976856c94bc8e25cce439808119d72ef0764547",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/infini_metadata.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6cbd810fab73d203955f91bc2e8cb690d9df7382c4d5db3a0a374ea39564b7aa",
"format": 1
},
{
"name": "plugins/modules/infini_network_space.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "4f6a141ab5e8a9bd89d90d9d90dba7e1b6a491f9b427107f0052e3a6a3972001",
+ "chksum_sha256": "ae0362e206645d3ab06a9f5fc6b3769fd70f1004148feaf4ec0eba581c94a581",
"format": 1
},
{
"name": "plugins/modules/infini_vol.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "9a92b8184d6cdbc4d6440a517bb51a288a90b54c392a4ce2ef4d5d4a4a4a9057",
+ "chksum_sha256": "76e218365ec9b8dce9f4a0e7aba6eb1c9e27f15f36ef308f5044f714c3a19d0f",
"format": 1
},
{
- "name": "plugins/modules/infini_export_client.py",
+ "name": "plugins/modules/infini_notification_rule.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "dfc2c3af83c2356754e239d4bce24cb509dcf057864cf15f6083fe8b9ca070b9",
+ "chksum_sha256": "28ff855e3ffd038af35d92030c87461bcaff64f3e3c561a868f37800ecba9c33",
"format": 1
},
{
- "name": "plugins/modules/infini_pool.py",
+ "name": "plugins/modules/infini_fs.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b107fd822d1a2347e3f1f8ef38445f4dda191661accb989254d807bf476e9e16",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/infini_export.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5c207376a0995ba10858877af5076565dda8f9d5430a53222f178bf58a8dfb6c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/infini_notification_target.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "3e24f22b08d7778dd9394cfc53afa453c97dfd5bd555451c137a0083b666c85b",
+ "chksum_sha256": "d545f6a59abdcf7947f9a07b437bd1d4cecd19f4563e42348c2ed6a4dd2e4d68",
"format": 1
},
{
"name": "plugins/modules/infini_cluster.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "2ca78b1585c6395834647ced906c49ea160922de7659e993b3bf2a67fa590728",
+ "chksum_sha256": "cff08f680199863e095fae1c890cc17b65e15d263c1b84da859c170bb6c8cb82",
"format": 1
},
{
"name": "plugins/modules/infini_host.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "8fe8d1ceff823d19bd703fe9690fd3c28ffd3c118ad79d2a57e1b00d9c1ed294",
+ "chksum_sha256": "96453b44cad54dcd36760356577b03cf63b436543612d36fe2340cd148044143",
"format": 1
},
{
- "name": "plugins/modules/infini_export.py",
+ "name": "plugins/modules/infini_pool.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "e476bdf871ee10af8aac8909fdcffb4c10739483e94268ad5906039a9dc118c4",
+ "chksum_sha256": "09d81af6094917024aa51c39d9ae88367afa337f1944b12e9e0208369924833e",
"format": 1
},
{
- "name": "plugins/modules/infini_user.py",
+ "name": "plugins/modules/infini_port.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "10bb0e644545ad216fff1db8cd9952d65941dcfdfcd6da3536513434baa7351b",
+ "chksum_sha256": "09ed2ba4508b91df5c8a15d694463e8ea87615acd21e362ae1219b6e4bc1492f",
"format": 1
},
{
- "name": "plugins/modules/__init__.py",
+ "name": "plugins/modules/infini_sso.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "chksum_sha256": "cbb5879bb0874d71dbf648f13c5a11c5767356cddd29c1239898b38141f4a3b4",
"format": 1
},
{
- "name": "plugins/filter",
+ "name": "tests",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
- "name": "plugins/filter/delta_time.yml",
+ "name": "tests/hacking",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/hacking/infini_certificate_present.json",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "8d8fd570aa879ec7e5436d9981eb561e8fea5b30d12def6767043b0a25909506",
+ "chksum_sha256": "6ca7947e40e561c010b57937c37459d7cec19ac3dca1341417654c43458b7fb5",
"format": 1
},
{
- "name": "plugins/filter/psus_filters.py",
+ "name": "tests/hacking/infini_user_stat.json",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "0f88020170134ef6e22aeeeb2c72eeb36427817027b2fd7a6143ea76355690a4",
+ "chksum_sha256": "2ceb51d08936f7fcf907a5927d181a5c74b83d9951519450c7a2c71d601c2399",
"format": 1
},
{
- "name": "plugins/doc_fragments",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
+ "name": "tests/hacking/infini_user_simple_stat.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9a8ed6a296c3fc51afd5b174814860e1ecc417565d2e2fcad4e7172f892a631f",
"format": 1
},
{
- "name": "plugins/doc_fragments/infinibox.py",
+ "name": "tests/hacking/infini_fibre_channel_switch_stat.json",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "3e25f04ed34bd1678dd7e22073f44c5dab1e063a0f588b7160e3abcfa2489629",
+ "chksum_sha256": "1793740680f859d1d9b3843022ad3c4dd3edcbcea4e1735f6d3cbdc042c650f7",
"format": 1
},
{
- "name": "plugins/module_utils",
+ "name": "tests/hacking/infini_vol_present_example.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "23e4089f3d750800a7c782a3d579a53f298d366f0b1f84431a02414c12b81160",
+ "format": 1
+ },
+ {
+ "name": "tests/hacking/infini_user_ldap_stat.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2ceb51d08936f7fcf907a5927d181a5c74b83d9951519450c7a2c71d601c2399",
+ "format": 1
+ },
+ {
+ "name": "tests/hacking/infini_network_space_present.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "69c3dcd9db6b1ec9f1519305d574843089ec93635a4a36ec89917bd480d19c1e",
+ "format": 1
+ },
+ {
+ "name": "tests/hacking/infini_network_space_stat.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a457a12bbbec4de0ca11181ae1d40b8af61e17007bf745d84f99712bf5eefd74",
+ "format": 1
+ },
+ {
+ "name": "tests/hacking/infini_user_ldap_absent.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9a5e47488388b4adf1131b3ac292e085fb368cc43e20c944ab715079335d3efd",
+ "format": 1
+ },
+ {
+ "name": "tests/hacking/infini_fs_present.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d6bfea20cd61cc73e6f30469b286de817f66b56c85ce3bf9d35b830cffff501d",
+ "format": 1
+ },
+ {
+ "name": "tests/hacking/infini_fs_absent.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8dc3f612f49ba283f3d851f34ee8440bf0bcdb06d198957c9da2c6e408a0b55d",
+ "format": 1
+ },
+ {
+ "name": "tests/hacking/infini_vol_stat.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "77b56e87d0997981a21bf05f1c181b95d6e0d2bf2dca2b159ab9b40ddbd05206",
+ "format": 1
+ },
+ {
+ "name": "tests/hacking/infini_vol_absent.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "59b98186a2797ff06920187321f9ab763eb8d012de1478778073b058eb1337ac",
+ "format": 1
+ },
+ {
+ "name": "tests/hacking/infini_vol_present.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "382fa9517b73f3f92beae3273ab5764b0d5ed29315d021beacc5e1757078558b",
+ "format": 1
+ },
+ {
+ "name": "tests/hacking/infini_users_repository_ldap_present.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "548edd33acba531eb99b36accdd4018760246db0c7f40f835cf7680ca29b8345",
+ "format": 1
+ },
+ {
+ "name": "tests/hacking/infini_metadata_absent.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4ffc6ed89bc4d7bce3880715c7e0383aac4f34a03ae8965b998cdf29962f9c54",
+ "format": 1
+ },
+ {
+ "name": "tests/hacking/infini_metadata_present.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cdb155ceea0d9749eda439aae9093cc253e97827810e7b8be65dfed8af7792f0",
+ "format": 1
+ },
+ {
+ "name": "tests/hacking/infini_users_repository_absent.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "75bcbe2bb4b9940f8fd433f7e114dbeff8a955622b3a12294a0dfeae7c875136",
+ "format": 1
+ },
+ {
+ "name": "tests/hacking/infini_users_repository_present.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "66310bafeceec53713f418fee6c5f332218158f248f4b73b9ddcdd2383b0356a",
+ "format": 1
+ },
+ {
+ "name": "tests/hacking/infini_users_repository_stat.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "95e6ca6225aa07f500acab0657d29642a047e3406690c25dcf2781e469e6b193",
+ "format": 1
+ },
+ {
+ "name": "tests/hacking/infini_users_repository_ad_stat.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "95e6ca6225aa07f500acab0657d29642a047e3406690c25dcf2781e469e6b193",
+ "format": 1
+ },
+ {
+ "name": "tests/hacking/infini_users_repository_ad_absent.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "75bcbe2bb4b9940f8fd433f7e114dbeff8a955622b3a12294a0dfeae7c875136",
+ "format": 1
+ },
+ {
+ "name": "tests/hacking/infini_users_repository_ad_present.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "66310bafeceec53713f418fee6c5f332218158f248f4b73b9ddcdd2383b0356a",
+ "format": 1
+ },
+ {
+ "name": "tests/hacking/infini_users_repository_ldap_absent.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "900827e9dcb11b09f2cf0d66a35c91008da5bbef33214b13dce7b7997841dbbe",
+ "format": 1
+ },
+ {
+ "name": "tests/hacking/infini_users_repository_ldap_stat.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "38a0658d1f2c2cbfccc3d10cf1b5acd2a68b8f1748caf0b30a6e0a93dbc51ea9",
+ "format": 1
+ },
+ {
+ "name": "tests/hacking/infini_metadata_stat.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "88468dff8d22068c313abab95c8d298953f516deb0b8765a3c77a1926d373ab7",
+ "format": 1
+ },
+ {
+ "name": "tests/hacking/infini_certificate_absent.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1ee672ec799ff626fc87b03894c6357b8dd9f0198dd07f55b1a3de3f5cc8d3c0",
+ "format": 1
+ },
+ {
+ "name": "tests/hacking/infini_user_ldap_present.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "694b3e8b89b598ae6fcc844de9c0fbb1137f22760025239aa81bdc17362a7898",
+ "format": 1
+ },
+ {
+ "name": "tests/hacking/infini_notification_rule_stat.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "991bf9066974b9e425ff08e51b87d5a3aa943cd7fc8d0523a3b1d59a85f3f0e0",
+ "format": 1
+ },
+ {
+ "name": "tests/hacking/infini_notification_rule_absent.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a6f1d3b769dd1e6cfc0458c9d81b2637d3b71503913f5ab9ba42fa87311f7150",
+ "format": 1
+ },
+ {
+ "name": "tests/hacking/infini_notification_rule_present.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "82cebfaa87803000c85ffc84f6134fa88ec952c4fb374c33b0a9d83d9c40137f",
+ "format": 1
+ },
+ {
+ "name": "tests/hacking/infini_notification_target_stat.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d1285e2c98d6cd9cf3dec8442fd0df6d11fec090fe9381323f863d17a4eb1234",
+ "format": 1
+ },
+ {
+ "name": "tests/hacking/infini_notification_target_present.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9916f7e8cbd582191a2296d96daa982739fa261a49ba6d172832f4d3c8ae3dda",
+ "format": 1
+ },
+ {
+ "name": "tests/hacking/infini_notification_target_absent.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bb79c89370e548d2661962c788ae7d78482716e68e103102867f68da94344e2e",
+ "format": 1
+ },
+ {
+ "name": "tests/hacking/infini_user_simple_present.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4eff3c5e345ed1099bee586c6fab5c64b51eec1849f6533943c7a3b4236c6813",
+ "format": 1
+ },
+ {
+ "name": "tests/hacking/infini_sso_present.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "10cc3063ad3a502f739c645c39a6275fc4b7261ed0ee7d955abd016446ae8470",
+ "format": 1
+ },
+ {
+ "name": "tests/hacking/infini_user_simple_login.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cda0cfe5b280ed0b4d3eee7a8a6bb855e89d3a27315f0e04cd7345c06abf27e8",
+ "format": 1
+ },
+ {
+ "name": "tests/hacking/infini_user_login.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cda0cfe5b280ed0b4d3eee7a8a6bb855e89d3a27315f0e04cd7345c06abf27e8",
+ "format": 1
+ },
+ {
+ "name": "tests/hacking/infini_user_absent.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9a5e47488388b4adf1131b3ac292e085fb368cc43e20c944ab715079335d3efd",
+ "format": 1
+ },
+ {
+ "name": "tests/hacking/infini_event_present.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a7df88481b602a3d06e49743c5f5a64eea6278c5d4f3190ab56e78bba856bdc5",
+ "format": 1
+ },
+ {
+ "name": "tests/hacking/infini_network_space_absent.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "239afac59dfb48676e8ab1902d40513674ab2ce1e549610d882688543739a5d5",
+ "format": 1
+ },
+ {
+ "name": "tests/hacking/infini_user_present.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "694b3e8b89b598ae6fcc844de9c0fbb1137f22760025239aa81bdc17362a7898",
+ "format": 1
+ },
+ {
+ "name": "tests/hacking/infini_certificate_stat.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b65562431780c519693ad1d0d7ba1c4e4756fb79ea6d88abb9aadd23bdee9bb0",
+ "format": 1
+ },
+ {
+ "name": "tests/hacking/infini_fibre_channel_switch_rename.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5235ce26fd770ce6b0475312f7c74d8b503e29670e9d7aaee4d63b923394da57",
+ "format": 1
+ },
+ {
+ "name": "tests/hacking/infini_user_simple_absent.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d7302bc1f22ffef2d3d3d820c12e2d213f76f11f5c6999e9108eca4bf905f4ab",
+ "format": 1
+ },
+ {
+ "name": "tests/hacking/infini_sso_absent.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "41cd28c549880266675b63f2a16ef2fccf7e2a44faf527df4454d383cefef1c4",
+ "format": 1
+ },
+ {
+ "name": "tests/hacking/infini_sso_stat.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ba0820095da29cfc3edba4d9b8cd8fbc042e9383dad9a5a6469c31f3602762b9",
+ "format": 1
+ },
+ {
+ "name": "tests/hacking/infini_host_absent.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "62163ae985c74c994d939df982410f62336a71a2e0746b590b6fcdfc1a981887",
+ "format": 1
+ },
+ {
+ "name": "tests/hacking/infini_host_present.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5e3305f7d665127fcde2e8ca18297f08f7f204dc404062f2194b169d0c50f880",
+ "format": 1
+ },
+ {
+ "name": "tests/hacking/infini_host_stat.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5d031d6060abab6b88f28e6b5b589ae3937e36af924ef7d32406f36759a9436e",
+ "format": 1
+ },
+ {
+ "name": "tests/hacking/infini_cluster_present.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b1fb6b3fbfd910049aad9a1d85e2e1e253da22f86783d465b08ddd5b43325c89",
+ "format": 1
+ },
+ {
+ "name": "tests/hacking/infini_cluster_stat.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "332e3d2f2b3d76a419fddfbe38ab67b7d919d0f9cdaf728de22a3a200926b678",
+ "format": 1
+ },
+ {
+ "name": "tests/hacking/infini_cluster_absent.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cf40ee2461bacade233f879634939d9c485121fcfdd128969c63ac6f44f77deb",
+ "format": 1
+ },
+ {
+ "name": "tests/hacking/infini_map_stat.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "755679bd63d37303562c3d1a6303665baa14995bb5952d2ef87c72710584bb92",
+ "format": 1
+ },
+ {
+ "name": "tests/hacking/infini_map_present.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "747c16676033646f61d331b260a997c3d32052dffa5b276e4f868759f89e299b",
+ "format": 1
+ },
+ {
+ "name": "tests/hacking/infini_map_absent.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d499f2fb8cb7ae401db0b18e0cbe7ddfc4c30317d045aaea9882e6e04b922b51",
+ "format": 1
+ },
+ {
+ "name": "tests/hacking/infini_fs_stat.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "25f66e71989ffe55e860cab51760ab08edd0a1c9f0e59b1265795164a9eb06b7",
+ "format": 1
+ },
+ {
+ "name": "tests/config.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9a009a349eaaf78c93ff56072d2ef171937bdb884e4976592ab5aaa9c68e1044",
+ "format": 1
+ },
+ {
+ "name": "scripts",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
- "name": "plugins/module_utils/infinibox.py",
+ "name": "scripts/syslog.log",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "b47754c0c5c2afccafcca852386186b223c36b6d801c62a2607a249918f49a6f",
+ "chksum_sha256": "fa297acd249a3868dfd028ecc722d9b10a775e122e773a5091741f4e81911f85",
"format": 1
},
{
- "name": "requirements.txt",
+ "name": "scripts/syslog_server.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "62f18d0b71e237e6ce05c76a970a709c43d962fa43f8a6801b7772c8ae1b4a05",
+ "chksum_sha256": "103529c45d6ca61c3a94089f1ead0a620da8a26c7fbd91c9b614d2a37566edb9",
+ "format": 1
+ },
+ {
+ "name": "LICENSE",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e77edcc69dd86480e3392a71e6be429cb9576ed59e49bbb2e9dac4a44b28068f",
+ "format": 1
+ },
+ {
+ "name": "README-DEV.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b0de3557148ac002aa96f835f5969211a73f223a0b79c2920ecb2a0fea1b73fb",
"format": 1
},
{
"name": "requirements-dev.txt",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "84259416e2286c6f6747c6eda6c943370b904e6cc98b3a4c350a38ecddf3db13",
+ "chksum_sha256": "d2d183ca6ef47ed67ebb14ca1cd741933daca55d5838b93c08781312ed2ed4e4",
"format": 1
},
{
- "name": "test-args",
+ "name": "requirements.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "62f18d0b71e237e6ce05c76a970a709c43d962fa43f8a6801b7772c8ae1b4a05",
+ "format": 1
+ },
+ {
+ "name": "bin",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
+ "name": "README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f3461153b976b6891497f8142af206c6196915b37179b9b57685f9a84bec31d0",
+ "format": 1
+ },
+ {
+ "name": ".gitlab-ci.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9df779aac7597942f23788f2b7636ec3f56612ffad1f548eb4b4bbed636d3764",
+ "format": 1
+ },
+ {
"name": ".gitignore",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "acc90aac40128d0adf5d3a48fb2b5d770a18d5b4ec5d7df499f0018e6453ed36",
+ "chksum_sha256": "dc9405faf5aa7aa9af717d8a1f4dad063a9103a7857b29d48775702e5ad79f02",
"format": 1
},
{
- "name": "LICENSE",
+ "name": "CHANGELOG.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "e77edcc69dd86480e3392a71e6be429cb9576ed59e49bbb2e9dac4a44b28068f",
+ "chksum_sha256": "ddb4872a24789ff8bbfbf443dbd2c6a1802c6a8e11d49681634f1321d6ce0d6e",
"format": 1
}
],
diff --git a/ansible_collections/infinidat/infinibox/MANIFEST.json b/ansible_collections/infinidat/infinibox/MANIFEST.json
index 87d69d95d..bfe21c7b6 100644
--- a/ansible_collections/infinidat/infinibox/MANIFEST.json
+++ b/ansible_collections/infinidat/infinibox/MANIFEST.json
@@ -2,7 +2,7 @@
"collection_info": {
"namespace": "infinidat",
"name": "infinibox",
- "version": "1.3.12",
+ "version": "1.4.3",
"authors": [
"Infinidat <partners.infi@infinidat.com>",
"David Ohlemacher",
@@ -13,8 +13,8 @@
"array",
"cloud",
"fiber",
- "fiber_channel",
- "fiberchannel",
+ "fibre_channel",
+ "fibrechannel",
"hostpowertools",
"ibox",
"infinibox",
@@ -29,7 +29,7 @@
"storage_array",
"storagearray"
],
- "description": "A set of Ansible modules that allow simple, but powerful, idempotent interactions with Infinidat InfiniBoxes.",
+ "description": "A set of Ansible modules that allow simple, but powerful, idempotent interactions with Infinidat Infiniboxes.",
"license": [
"GPL-3.0-or-later"
],
@@ -44,7 +44,7 @@
"name": "FILES.json",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "71d6d4e9dd13ffdc2e4aa2b21b5b1412b43c85b9be7c5c19d0231734caa025cd",
+ "chksum_sha256": "e45ed8525f39595d529c89a74a32429e893f5b90ee87289e837086a630df0bbd",
"format": 1
},
"format": 1
diff --git a/ansible_collections/infinidat/infinibox/Makefile b/ansible_collections/infinidat/infinibox/Makefile
deleted file mode 100644
index 0be78fdd6..000000000
--- a/ansible_collections/infinidat/infinibox/Makefile
+++ /dev/null
@@ -1,333 +0,0 @@
-# Copyright: (c) 2022, Infinidat <info@infinidat.com>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-# vim: set foldmethod=indent foldnestmax=1 foldcolumn=1:
-
-# A Makefile for creating, running and testing Infindat's Ansible collection.
-
-### Dependencies ###
-# - jq: https://stedolan.github.io/jq/
-# - spruce: https://github.com/geofffranks/spruce
-
-### environment ###
-# Include an env file with secrets. This exposes the secrets
-# as envvars only for the life of make. It does not
-# pollute the environment persistently.
-# Format:
-# API_KEY=someAnsibleGalaxyApiKey
-# The key only needs to be valid to use target galaxy-colletion-publish.
-
-_env = ~/.ssh/ansible-galaxy.sh
-include $(_env)
-export $(shell sed 's/=.*//' $(_env))
-
-# Use color in Makefiles.
-_use_color = true
-
-include Makefile-help
-
-### Vars ###
-_version = $(shell spruce json galaxy.yml | jq '.version' | sed 's?"??g')
-_namespace = $(shell spruce json galaxy.yml | jq '.namespace' | sed 's?"??g')
-_name = $(shell spruce json galaxy.yml | jq '.name' | sed 's?"??g')
-_install_path = ~/.ansible/collections
-_install_path_local = $$HOME/.ansible/collections
-_python_version = python3.8
-_venv = venv
-_requirements-file = requirements.txt
-_requirements-dev-file = requirements-dev.txt
-_user = psus-gitlab-cicd
-_password_file = vault_password
-_password = $$(cat vault_password.txt)
-_ibox_url = ibox1521
-_infinishell_creds = --user $(_user) --password $(_password) $(_ibox_url)
-SHELL = /bin/bash
-_ansible_clone = ~/cloud/ansible
-_network_space_ips = 172.31.32.145 172.31.32.146 172.31.32.147 172.31.32.148 172.31.32.149 172.31.32.150
-_modules = "infini_cluster.py" "infini_export.py" "infini_host.py" "infini_network_space.py" "infini_port.py" "infini_vol.py" "infini_export_client.py" "infini_fs.py" "infini_map.py" "infini_pool.py" "infini_user.py"
-
-##@ General
-create-venv: ## Setup venv.
- $(_python_version) -m venv $(_venv) && \
- source $(_venv)/bin/activate && \
- python -m pip install --upgrade pip && \
- python -m pip install --upgrade --requirement $(_requirements-file)
- python -m pip install --upgrade --requirement $(_requirements-dev-file)
-
-_check-vars:
-ifeq ($(strip $(API_KEY)),)
- @echo "API_KEY variable is unset" && false
-endif
-
-env-show: _check-vars
- @echo "API_KEY=[ set but redacted ]"
-
-version: _check-vars ## Show versions.
- @echo -e $(_begin)
- ansible --version
- @echo
- ansible-galaxy collection list
- @echo -e $(_finish)
-
-_test-venv:
- @# Test that a venv is activated
-ifndef VIRTUAL_ENV
- @echo "Error: Virtual environment not set"
- @echo -e "\nRun:\n make pyvenv"
- @echo -e " source $(_venv)/bin/activate\n"
- exit 1
-endif
- @echo "Virtual environment set"
-
-pylint:
- @echo -e $(_begin)
- cd plugins/modules && \
- pylint infini_network_space.py
- cd -
- @echo -e $(_finish)
-
-pyfind: ## Search project python files using: f='search term' make pyfind
- find . -name "*.py" | xargs grep -n "$$f" | egrep -v 'venv|eggs|parts|\.git|external-projects|build'
-
-##@ Galaxy
-
-galaxy-collection-build: ## Build the collection.
- @echo -e $(_begin)
- rm -rf collections/
- ansible-galaxy collection build
- @echo -e $(_finish)
-
-galaxy-collection-build-force: ## Force build the collection. Overwrite an existing collection file.
- @echo -e $(_begin)
- ansible-galaxy collection build --force
- @echo -e $(_finish)
-
-galaxy-collection-publish: _check-vars ## Publish the collection to https://galaxy.ansible.com/ using the API key provided.
- @echo -e $(_begin)
- ansible-galaxy collection publish --api-key $(API_KEY) ./$(_namespace)-$(_name)-$(_version).tar.gz -vvv
- @echo -e $(_finish)
-
-galaxy-collection-install: ## Download and install from galaxy.ansible.com. This will wipe $(_install_path).
- @echo -e $(_begin)
- ansible-galaxy collection install $(_namespace).$(_name) --collections-path $(_install_path) --force
- @echo -e $(_finish)
-
-galaxy-collection-install-locally: ## Download and install from local tar file.
- @echo -e $(_begin)
- ansible-galaxy collection install --force $(_namespace)-$(_name)-$(_version).tar.gz --collections-path $(_install_path_local)
- @echo -e $(_finish)
-
-##@ Playbooks Testing
-_test_playbook:
- @# Run a playbook specified by an envvar.
- @# See DEV_README.md
- @# vault_pass env var must be exported.
- cd playbooks && \
- export ANSIBLE_LIBRARY=/home/dohlemacher/cloud/ansible-infinidat-collection/playbooks/plugins/modules; \
- export ANSIBLE_MODULE_UTILS=/home/dohlemacher/cloud/ansible-infinidat-collection/plugins/module_utils; \
- if [ ! -e "../vault_password.txt" ]; then \
- echo "Please add your vault password to vault_password.txt"; \
- exit 1; \
- fi; \
- ansible-playbook \
- $$ask_become_pass \
- --inventory "inventory" \
- --extra-vars "@../ibox_vars/iboxCICD.yaml" \
- --vault-password-file ../vault_password.txt \
- "$$playbook_name"; \
- cd -
-
-test-create-resources: ## Run full creation test suite as run by Gitlab CICD.
- @echo -e $(_begin)
- ask_become_pass="-K" playbook_name=test_create_resources.yml $(_make) _test_playbook
- @echo -e $(_finish)
-
-test-remove-resources: ## Run full removal test suite as run by Gitlab CICD.
- @echo -e $(_begin)
- ask_become_pass="-K" playbook_name=test_remove_resources.yml $(_make) _test_playbook
- @echo -e $(_finish)
-
-test-create-snapshots: ## Test creating immutable snapshots.
- @echo -e $(_begin)
- playbook_name=test_create_snapshots.yml $(_make) _test_playbook
- @echo -e $(_finish)
-
-test-remove-snapshots: ## Test removing immutable snapshots (teardown).
- @echo -e $(_begin)
- playbook_name=test_remove_snapshots.yml $(_make) _test_playbook
- @echo -e $(_finish)
-
-test-create-net-spaces: dev-install-modules-to-local-collection ## Test creating network spaces.
- @echo -e $(_begin)
- playbook_name=test_create_network_spaces.yml $(_make) _test_playbook
- @echo -e $(_finish)
-
-test-remove-net-spaces: ## Test removing net spaces (teardown).
- @echo -e $(_begin)
- playbook_name=test_remove_network_spaces.yml $(_make) _test_playbook
- @echo -e $(_finish)
-
-test-create-map-cluster: ## Run full creation test suite as run by Gitlab CICD.
- @echo -e $(_begin)
- playbook_name=test_create_map_cluster.yml $(_make) _test_playbook
- @echo -e $(_finish)
-
-test-remove-map-cluster: ## Run full removal test suite as run by Gitlab CICD.
- @echo -e $(_begin)
- playbook_name=test_remove_map_cluster.yml $(_make) _test_playbook
- @echo -e $(_finish)
-
-##@ Infinisafe Demo
-
-infinisafe-demo-setup: ## Setup infinisafe demo.
- @echo -e $(_begin)
- playbook_name=infinisafe_demo_setup.yml $(_make) _test_playbook
- @echo -e $(_finish)
-
-infinisafe-demo-runtest: ## Run tests on infinisafe demo snapshot on forensics host.
- @echo -e $(_begin)
- ask_become_pass="-K" playbook_name=infinisafe_demo_runtest.yml $(_make) _test_playbook
- @echo -e $(_finish)
-
-infinisafe-demo-teardown: ## Teardown infinisafe demo.
- @echo -e $(_begin)
- ask_become_pass="-K" playbook_name=infinisafe_demo_teardown.yml $(_make) _test_playbook
- @echo -e $(_finish)
-
-##@ Hacking
-#_module_under_test = infini_network_space
-_module_under_test = infini_fs
-
-dev-hack-create-links: ## Create soft links inside an Ansible clone to allow module hacking.
- @#echo "Creating hacking module links"
- @for m in $(_modules); do \
- ln --force --symbolic $$(pwd)/plugins/modules/$$m $(_ansible_clone)/lib/ansible/modules/infi/$$m; \
- done
- @#echo "Creating hacking module_utils links $(_module_utilities)"
- @for m in "infinibox.py" "iboxbase.py"; do \
- ln --force --symbolic $$(pwd)/plugins/module_utils//$$m $(_ansible_clone)/lib/ansible/module_utils/$$m; \
- done
-
-_dev-hack-module: dev-hack-create-links # Run module. PDB is available using breakpoint().
- cwd=$$(pwd) && \
- cd $(_ansible_clone) && \
- JSON_IN="$$cwd/tests/hacking/$(_module_under_test)_$${state}.json" && \
- if [[ ! -a "$$JSON_IN" ]]; then \
- >&2 echo "Error: $$JSON_IN not found"; \
- exit; \
- fi; \
- source venv/bin/activate 1> /dev/null 2> /dev/null && \
- source hacking/env-setup 1> /dev/null 2> /dev/null && \
- AIC=/home/dohlemacher/cloud/ansible-infinidat-collection \
- ANS=/home/dohlemacher/cloud/ansible \
- PYTHONPATH="$$PYTHONPATH:$$AIC/plugins/modules" \
- PYTHONPATH="$$PYTHONPATH:$$AIC/plugins/module_utils" \
- PYTHONPATH="$$PYTHONPATH:$$ANS/lib" \
- PYTHONPATH="$$PYTHONPATH:$$ANS/hacking/build_library/build_ansible" \
- PYTHONPATH="$$PYTHONPATH:$$ANS/venv/lib/python3.8/site-packages" \
- python -m "$(_module_under_test)" "$$JSON_IN" 2>&1 | \
- grep -v 'Unverified HTTPS request'
-
-_dev-hack-module-jq: # If module is running to the point of returning json, use this to run it and prettyprint using jq.
- @$(_make) _dev-hack-module | egrep 'changed|failed' | jq '.'
-
-dev-hack-module-stat: ## Hack stat.
- @state=stat $(_make) _dev-hack-module
-
-dev-hack-module-stat-jq: ## Hack stat with jq.
- @state=stat $(_make) _dev-hack-module-jq
-
-dev-hack-module-present: ## Hack present.
- @state=present $(_make) _dev-hack-module
-
-dev-hack-module-present-jq: ## Hack present with jq.
- @state=present $(_make) _dev-hack-module-jq
-
-dev-hack-module-absent: ## Hack absent.
- @state=absent $(_make) _dev-hack-module
-
-dev-hack-module-absent-jq: ## Hack absent with jq.
- @state=absent $(_make) _dev-hack-module-jq
-
-##@ Test Module
-_module = infini_network_space.py
-
-find-default-module-path: ## Find module path.
- ansible-config list | spruce json | jq '.DEFAULT_MODULE_PATH.default' | sed 's?"??g'
-
-_collection_local_path = ~/.ansible/collections/ansible_collections/infinidat/infinibox/plugins
-dev-install-modules-to-local-collection: ## Copy modules to local collection
- @echo -e $(_begin)
- @echo "local collection path: $(_collection_local_path)"
- @echo "Installing modules locally"
- @cp plugins/modules/*.py $(_collection_local_path)/modules
- @echo "Installing utilities locally"
- @cp plugins/module_utils/*.py $(_collection_local_path)/module_utils
- @echo "Installing filters locally"
- @cp plugins/filter/*.py $(_collection_local_path)/filter
- @echo -e $(_finish)
-
-##@ ansible-test
-test-sanity: ## Run ansible sanity tests
- @# in accordance with
- @# https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html#testing-collections
- @# This runs on an collection installed from galaxy. This makes it
- @# somewhat useless for dev and debugging. Use target test-sanity-locally.
- cd $(_install_path)/ansible_collections/infinidat/infinibox && \
- ansible-test sanity --docker default -v
-
-_setup-sanity-locally: galaxy-collection-build-force galaxy-collection-install-locally
- @# Setup a test env.
- cd $(_install_path_local)/ansible_collections/infinidat/infinibox && \
- $(_python_version) -m venv $(_venv) && \
- source $(_venv)/bin/activate && \
- python -m pip install --upgrade pip && \
- python -m pip install --upgrade --requirement $(_requirements-file)
-
-test-sanity-locally: _setup-sanity-locally ## Run ansible sanity tests locally.
- @# in accordance with
- @# https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html#testing-collections
- @# This runs on an collection installed locally making it useful for dev and debugging.
- cd $(_install_path_local)/ansible_collections/infinidat/infinibox && \
- ansible-test sanity --docker default --requirements $(_requirements-file)
-
-test-sanity-locally-all: galaxy-collection-build-force galaxy-collection-install-locally test-sanity-locally ## Run all sanity tests locally.
- @# Run local build, install and sanity test.
- @# Note that this will wipe $(_install_path_local).
- @echo "test-sanity-locally-all completed"
-
-##@ IBox
-infinishell: ## Run infinishell.
- @TERM=xterm infinishell $(_infinishell_creds) --json
-
-infinishell-events: # Run infinishell with hint to watch events.
- @TERM=xterm echo "Command: event.watch username=$(_user) exclude=USER_LOGGED_OUT,USER_LOGIN_SUCCESS,USER_SESSION_EXPIRED,USER_LOGIN_FAILURE tail_length=35"
- @TERM=xterm infinishell $(_infinishell_creds)
-
-infinishell-network-space-iscsi-create: ## Create a network space using infinishell.
- @echo -e $(_begin)
- @TERM=xterm infinishell --cmd="config.net_space.create name=iSCSI service=iSCSI interface=PG1 network=172.31.32.0/19 -y" $(_infinishell_creds) 2>&1 \
- | egrep 'created|already exists' && \
- for ip in $(_network_space_ips); do \
- echo "Creating IP $$ip" && \
- TERM=xterm infinishell --cmd="config.net_space.ip.create net_space=iSCSI ip_address=$$ip -y" $(_infinishell_creds) 2>&1 \
- | egrep 'created|NET_SPACE_ADDRESS_CONFLICT' && \
- echo "Enabling IP $$ip"; \
- done
- @echo -e $(_finish)
-
-infinishell-network-space-iscsi-delete: ## Delete a network space using infinishell.
- @echo -e $(_begin)
- @for ip in $(_network_space_ips); do \
- echo "Disabling IP $$ip" && \
- TERM=xterm infinishell --cmd="config.net_space.ip.disable net_space=iSCSI ip_address=$$ip -y" $(_infinishell_creds) 2>&1 \
- | egrep 'disabled|IP_ADDRESS_ALREADY_DISABLED|no such IP address|No such network space' && \
- echo "Deleting IP $$ip" && \
- TERM=xterm infinishell --cmd="config.net_space.ip.delete net_space=iSCSI ip_address=$$ip -y" $(_infinishell_creds) 2>&1 \
- | egrep '$$ip deleted|no such IP address|No such network space'; \
- done
- @echo
- @echo "Deleting network space iSCSI" && \
- TERM=xterm infinishell --cmd="config.net_space.delete net_space=iSCSI -y" $(_infinishell_creds) 2>&1 \
- | egrep 'deleted|No such network space';
- @echo -e $(_finish)
diff --git a/ansible_collections/infinidat/infinibox/Makefile-help b/ansible_collections/infinidat/infinibox/Makefile-help
deleted file mode 100644
index ff6914eab..000000000
--- a/ansible_collections/infinidat/infinibox/Makefile-help
+++ /dev/null
@@ -1,82 +0,0 @@
-# ##@ Help Group Name
-# .PHONY: recipe_name
-# To pick up the recipe name, use:
-# recipe_name: ## Recipe help.
-# @echo -e $(_begin)
-# ...works...
-# @echo -e $(_finish)
-
-# Use color in Makefiles.
-_USE_COLOR ?= true
-
-.DEFAULT_GOAL := help
-
-# Color vars
-_help_padding = 30
-_black = \033[0;30m
-_red = \033[0;31m
-_green = \033[0;32m
-_orange = \033[0;33m
-_blue = \033[0;34m
-_purple = \033[0;35m
-_cyan = \033[0;36m
-_lt_gray = \033[0;37m
-_gray = \033[1;30m
-_lt_rd = \033[1;31m
-_lt_green = \033[1;32m
-_yellow = \033[1;33m
-_lt_blue = \033[1;34m
-_lt_purple= \033[1;35m
-_lt_cyan = \033[1;36m
-_white = \033[1;37m
-_reset = \033[0m
-
-# Customize colors for 'make help'
-_group = $(_lt_blue)
-_recipe = $(_orange)
-# Customize colors for make leaders/followers
-_heading = $(_yellow)
-
-# Use _make to execute make with consistent options
-_make = make --no-print-directory
-_watch = watch -n 1 --no-title
-
-# Other vars
-_LEADER = "================ ["
-_FOLLOWER = "\] ================"
-
-ifneq ($(_USE_COLOR),true)
- # No color
- _group =
- _recipe =
- _heading =
- _reset =
-endif
-
-_begin = "$(_heading)$(_LEADER) Begin $@ $(_FOLLOWER)$(_reset)"
-_finish = "$(_heading)$(_LEADER) Completed $@ $(_FOLLOWER)$(_reset)"
-
-##@ General
-.PHONY: help
-help: ## Display this help.
- @echo "$(_purpose)"
- @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make $(_recipe)<recipe>$(_reset)\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf "$(_recipe)%-$(_help_padding)s$(_reset) %s\n", $$1, $$2 } /^##@/ { printf "\n$(_group)%s$(_reset)\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
-
-.PHONY: color-help
-color-help: ## Edit Makefile-help to change color schemes.
- @echo -e $(_begin)
- @echo "To enable colors in make output, 'export _USE_COLOR=true' with the leading underscore"
- @echo "Edit Makefile-help to change color schemes"
-ifeq ($(_USE_COLOR),true)
- @printf "\n\tColor is currently enabled\n\n"
-else
- @printf "\n\tColor is currently disabled\n\n"
-endif
- @echo -e $(_finish)
-
-_find_ignore=.git|test|log
-.PHONY: ansfind
-ansfind: ## Search project files using: f='search term' make ansfind
- @echo -e $(_begin)
- find . -name "*" -type=f | xargs grep -n "$$f" | egrep -v '$(_find_ignore)'
- @echo -e $(_finish)
diff --git a/ansible_collections/infinidat/infinibox/README-DEV.md b/ansible_collections/infinidat/infinibox/README-DEV.md
new file mode 100644
index 000000000..23bb71db0
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/README-DEV.md
@@ -0,0 +1,21 @@
+# Infinidat's Ansible Collection Development
+
+Installing Ansible using pip from within a venv caused an error much later in the dev process. When installing a collection built in this environment this error occurred:
+```
+$ make galaxy-collection-install-locally
+================ [ Begin galaxy-collection-install-locally ] ================
+ansible-galaxy collection install --force infinidat-infinibox-1.4.0.tar.gz --collections-path $HOME/.ansible/collections
+Starting galaxy collection install process
+Process install dependency map
+Starting collection install process
+Installing 'infinidat.infinibox:1.4.0' to '/home/stack/.ansible/collections/ansible_collections/infinidat/infinibox'
+ERROR! Unexpected Exception, this is probably a bug: "linkname 'venv/lib/python3.8/site-packages/ansible_test/_data/injector/python.py' not found"
+```
+
+Therefor using a venv is not recommended. Instead use the following that will install ansible commands into `~/.local/bin`.
+```
+$ python3 -m pip install --user ansible
+$ export PATH=/home/stack/.local/bin:$PATH
+```
+
+
diff --git a/ansible_collections/infinidat/infinibox/README.md b/ansible_collections/infinidat/infinibox/README.md
index b4bb06c90..e248972ca 100644
--- a/ansible_collections/infinidat/infinibox/README.md
+++ b/ansible_collections/infinidat/infinibox/README.md
@@ -1,22 +1,39 @@
# Infinidat's Ansible Collection
+## Links
+- https://galaxy.ansible.com/infinidat/infinibox
+- https://docs.ansible.com/ansible/latest/collections/infinidat/infinibox
+- https://github.com/Infinidat/ansible-infinidat-collection
+- https://git.infinidat.com/PSUS/ansible-infinidat-collection
+
## Platforms Supported
All Infindat InfiniBoxes are supported.
## Prerequisites
-- Ansible 2.12 or newer
-- InfiniSDK 151.1.1 or newer.
+- Ansible 2.14 or newer
+- InfiniSDK 225.1.1 or newer
+- Python 3.6 or newer. This is a prerequisite of Infinidat's infinisdk Python module.
## Modules
+- infini_certificate: Configure a SSL certificate.
- infini_cluster: Creates, deletes or modifies host clusters.
-- infini_export_client: Creates, deletes or modifys NFS client(s) for existing exports.
+- infini_config: Modify an Infinibox configuration.
- infini_export: Creates, deletes or modifies NFS exports.
+- infini_export_client: Creates, deletes or modifys NFS client(s) for existing exports.
+- infini_fibre_channel_switch: Rename a fibre channel switch.
- infini_fs: Creates, deletes or modifies filesystems.
- infini_host: Creates, deletes or modifies hosts.
- infini_map: Creates or deletes mappings of volumes to hosts.
+- infini_metadata: Creates or deletes metadata for various Infinidat objects.
+- infini_network_space: Creates or deletes network spaces.
+- infini_notification_rule: Configure notification rules.
+- infini_notification_target: Configure notification targets.
- infini_pool: Creates, deletes or modifies pools.
-- infini_port: Adds or deletes fiber channel or iSCSI ports to hosts.
+- infini_port: Adds or deletes fibre channel or iSCSI ports to hosts.
+- infini_sso: Configure a single-sign-on (SSO) certificate.
- infini_user: Creates, deletes or modifies an InfiniBox user.
+- infini_users_repositories: Creates, deletes, or modifies LDAP and AD Infinibox configurations.
+- infini_users_repository: Configure Active directory (AD) and Lightweight Directory Access Protocol (LDAP).
- infini_vol: Creates, deletes or modifies a volume.
Most modules also implement a "stat" state. This is used to gather information, aka status, for the resource without making any changes to it.
diff --git a/ansible_collections/infinidat/infinibox/ibox_vars/iboxCICD.yaml b/ansible_collections/infinidat/infinibox/ibox_vars/iboxCICD.yaml
index 476b19b8b..91aeb192e 100644
--- a/ansible_collections/infinidat/infinibox/ibox_vars/iboxCICD.yaml
+++ b/ansible_collections/infinidat/infinibox/ibox_vars/iboxCICD.yaml
@@ -1,10 +1,11 @@
$ANSIBLE_VAULT;1.1;AES256
-66343034363232313933643233373938303539343932613065656639303736666261396638333565
-6430326563653366353435663339616638396164316633370a303964386364356365663064613766
-36616565626561396434303535663133656562646632383139393866393334383331623133393030
-3838663637366337310a333462633161316239663964653835336534636662393730313731666433
-66646237393738323330383465396437666365626636316162373964653737383034353530306633
-33643337373164376664643465646437316530623363643634323835303030333935346637613236
-61666331366661336234666436656665663464353664363761393866653263356434313232363564
-65346630386262633962386537376234646666326161343738303962306537323162306362636634
-66643231636466646539386137363037346434363962653834353139386434643464
+62653266666434313135323036623039316436346264346165616364343662643163343835363366
+6234343463353063643666386537613034363837323730370a396338326665333066323635383833
+34363466633839306135386336613931353335306330346330303930303132373231633362353363
+3539373931306362610a613330346535373438643763663365306631336638316234613863323234
+35343636623034646632396235393630356632386236323935613039313266653965646563613938
+37303336343431386566656331613461376533353164376430343631353365306431363034383763
+37383633343562386561306638386635313432623766306336386366666336323663313966353963
+66643931653363316130316331633137333237376331353439356131623735346561636262646332
+33316436643365336639666461633762353833386330643331633931323664643364393835396330
+6364626632306532373263333430336135336238373731646666
diff --git a/ansible_collections/infinidat/infinibox/ibox_vars/iboxNNNN_example.yaml b/ansible_collections/infinidat/infinibox/ibox_vars/iboxNNNN_example.yaml
index f802b163b..d4ef26691 100644
--- a/ansible_collections/infinidat/infinibox/ibox_vars/iboxNNNN_example.yaml
+++ b/ansible_collections/infinidat/infinibox/ibox_vars/iboxNNNN_example.yaml
@@ -10,3 +10,5 @@ auto_prefix: "PSUS_ANSIBLE_"
user: "user"
password: "password"
system: "iboxNNNN"
+
+sso_signing_certificate: "signing cert in one single line with the begin and end lines removed" # Used by configure_array playbook
diff --git a/ansible_collections/infinidat/infinibox/ibox_vars/vibox.yaml b/ansible_collections/infinidat/infinibox/ibox_vars/vibox.yaml
new file mode 100644
index 000000000..76dc8b5b7
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/ibox_vars/vibox.yaml
@@ -0,0 +1,134 @@
+$ANSIBLE_VAULT;1.1;AES256
+65633535303262623839393038393634326234376132336138653530616562386437363737666238
+3561386434663132373330653534623364613330323663380a313164353936653430613832623966
+66303131373861386338336638636235326334616435626665363465323634653537343331613465
+3235656431383237650a643630613131306264313062636165393164366361616431303339623236
+64653063626333306466663637343332306634626131666634663035393066383636633563626537
+66663737616435323833653466623231646462336163633363333165646338343166616232356165
+32306166376130396264363561396639353165653664316139653366666638343761336239333235
+61303430343061396262373032666133393334326465373964613638336132623466313666623735
+65396337623963346633356462353732356237616566346132343738323532663139643661623364
+64663638666633343665623438336164643565383362323631356232346338653537663430306566
+34366635623735343439383963313261666131636239373238646261313565373731303365316534
+64316263666366326263636631376232343632346634363532643838653033626636316332346433
+37613632373439633731643531333834373433653663326534316337646361393537616262643137
+61323362316333633630663264373836306339643632393462323536386366346439383562613164
+37353534316137663938653630663639366531386633643335623065366131633730383938393565
+62343638326439663130646236373938623436383161386562346431316330613231663361353938
+38663765333239393866363338656264326164353262363839313433363339666139616537646231
+65376361646561646436306433316266356233306434613232333661386564373532333565373038
+65653463336436316338373338313365346634393437333163353936396666626139333934386137
+36306234663163376131663738643764393537303932383037623534376432376431663738333539
+64396136663964306639393162363931323661376134613739336636613734636338353334383763
+34313266353635623335363266636336663932363261333839383664303163353332656365386439
+62303233643665303730653937363338313139646535313765323961356438633739623762656166
+61376137666236333063653630633537653832333165353565393636326666326534333736393835
+65303436303161316139636366303364316431306162666432376638616464326431326365326461
+36363234396130393661313230616130643131373162396139373766303465656164386433366131
+63306536333930653535373636633137623837396163373061363635653037303435643536636563
+36353531333733333237623065326636383161613633373061323530666662646232613861343439
+66343035326434653837643665323937383636353234653864386232336530303666303363633736
+35316262663438646638383266616434303732636362616332316331386262396632623963343266
+36306563326135396138383231353861646462663265646333393665646636326362336430383736
+66343965373066346433666333366362653363626432316439623535306534653331653330353033
+66643362666433616139316430383062386364646433623432353162656337326661343336376162
+64383933393535646635363232306331613562313164393263343433646464613266343361613866
+65386164393031643537373366633566643961653366336534633831646132623236633934373731
+32656630643638393565336634663234633431303534313865333636613464373063363762373137
+31396564323562663033343664393837313665356165353531633433393266356130363264396162
+62393132383530363236336137313939333561326263386165373631633264303262616236306135
+35343636386666633337353863666566336630373736323563613466356261656630623465633465
+64333632303934383532663962653236646162303864303836666566343331626332663565353433
+34323165396535323566393036653433316337653130633364303166313035363432643965316535
+34313164653962396231646362313537623932633933396637653535393264353165646333373766
+33313466663636666330626236393632366230653533356461303834316434306536376539626463
+32343438646630396434353265643530616664353539666562626563653838323661393831343633
+64393432653233613734623466666135333135626661376362623732653134636332303837666432
+63393733313737656130366335336533323266373135306161653361633065643364623265653735
+32326339666538356361656331313761326532653037353731313331306463663866663961336239
+65323538346161386264633664663435616437326633386630383863633466363163396136353430
+31633338613664346434353339353139663863363066383937663433326536326435313332383636
+39353730376131656431393336323632373937653532663036646565303337613761353537393933
+33393735366239666638383336613632663434653630653936633937353137316466393937616366
+65333531376330346438323766303331653137663535323932336234373533306337313837303763
+38316265386132393435663364393838326631633638336430353533356238643734626239653837
+35626162383935383065343236653465623838646238353635313939363661333033623663613539
+33383665356662353834376464333532353563303465323762653538393637343236626639656133
+38346464393831373538336164333063636439623234316632353738653730613939316561303466
+66663763306432643435376636313666653139346632376663323432616565656437396362613461
+31383263303838363735333135623665343535663732626561616562383564653037326437643831
+32633335376636363034326236663438393761336630666336353365613237653861373737623436
+39656530346336626365316338633635363865383935623934363738653637396132626232616532
+31633064613532346363623537353936663664623037643737376139303961636237363562646338
+30633432626334643161313436623733656166636634323861306364616434333963323966303563
+61363632626535363963313362336632326565383039373834366236313331666437613735353135
+39323138353333666137383965646134656337393432383936626533616564343839373035616536
+30383363653730343637336562633035653335383761313430653765383637363364653331646464
+33396165326163636434356630663432386261353330663962633534666136623363623735346564
+36336635313130336539366262653864316234306337656439633061356533323839383065336639
+65616363383235343931393562313038333430343965383730646165376665663233663635343665
+66303464393963663736333062663235343164393364623539666536663738376563363166363434
+37396530326330333431626231306162663333366665646639623333313565653964666161643431
+33643166323965366532663433613436353938363133643764346337343738363031316461393636
+36333036336438356466303633646165376264303237383461323738663264613630646338353637
+38356439363030643137636235303932353535636666636139326431396330316539356464306666
+39313039663733376535616239646664393464633065333731613637356664333331353430643461
+39343431663936306463396139646133363361616237353765656533663265303963633432333330
+63646632336334643439613033346263306664383561313839356137373134313433623031366134
+64616432636337633432343236663163343732346239313833633066336334356530636438376664
+64383565636233333565373933616262663631656137663231393663666531326538616437663438
+65356465323966616365663136613232316338646462643965393364666263646537376439633637
+36656136303063303565346239623161383232383734376530383031653830643964333937373161
+66306137363666353262306162643432303736396237666136383036386530336163306131613434
+65303964313436383265636465316561306238326435663765616466376637383738613930376231
+39326436306130323665373135636164306135313238383435366563306431663664633261336438
+39613136343466383331386334636566316266396637333233363965633239363965306131333532
+31373933323736336461316163323639316163656336633938326337316364646461343565376166
+31646334373966323931393532333335316430303530373263393633326130333265393630623532
+37323836363761383538326161306539653930373038366364366239353239343666623436616265
+35303662373761353530333435383362636435623932396632383232353833663135303066353736
+62636336343163356638326236633562373834316538376435623839376333326635666331383664
+34636161623065616238613264363336346639623063643630653366373035303639656631353363
+36306162343238656536343238303431306161386465306435313137373464636162643532316462
+35353665383935613433663034323537336566656439323165633638323334313666633138363037
+65663761616263346631333937633635663735366438646631363630316565373964316564306563
+30333737326663343632666133616462373261376262643632636431366365653266306336396630
+32356239356561663863656461316166616133386165613139653463663435303266323761653834
+37373663313839366664643461653361306465653931636535363662656630396239623538633935
+36363837626534613234386431636662303866363764333630656431363063656530393738356561
+63316338366433353564646530326539643330653630636435386339613232326530363662666532
+61376165623065323964393561386335386534646232323936303962363964336233356234383030
+35386137633937336232353830643430363062643266363036333836376335626535373139356330
+66336536633365346162393938393165613264393065303234633930663534633732666339356161
+30623661643430623732636262346666303262653934663038343365323035626533613636373035
+65323539373432386664396161353061653334343638623439393531656538643661326536306561
+66303362333565343238646230306165353434376563636664646462623631626233663762636262
+62313730663634376233316237346665363561356366346534356235383935633665643461633662
+64373336663665616632616331663665373564353565353665666539326232613930623061313738
+38663038363839353464633139626265666535323764366138626362383837343133386435303565
+32643334326437663030373163303565623235383762313161313266336637306335336238303538
+62653833643235303239633730376432633366343531636564386362633534633730353563336166
+63663334643264316530323737303039626434383462343238653965333531313639313763343564
+66643035313063373732616332633635363537626534353731396661313936316431356130653231
+66373630323334376336663066623862323762666336366537666435636232323566393862346133
+65396334333031366137353833376337383466326565613763616366613161316561643433363838
+35333861363735343433343032653330653438343030373734396336643032366339633431383465
+64366431633564633234363261303135343233613436323965616437303933326138373531323239
+38396464306138656533333335303161633562353735313462383731633532626264386636333863
+62366666616530623837663739306432383361396136313230613361633932343465626338323137
+61366536373539376436623537386430636533656630666338323061316161333933643839366430
+35613838613866323239326264656361303762383265666562356231316637646638636364326636
+31373236376331653930666237346363313461636366386665323931663161336633623039373237
+33623364373162316366653462353532356633616231663735396535343336393061386432396134
+39303432396262373931303064653131343535393235323062316232343336346631613563383832
+34353162366239356336663631363030303565623261616235663061666431306234623838633134
+30646164356237393933343837316339376664316236336130393735313536376164333963653166
+35366461613930623539323335306364346439373536316434363930343263663661343166326139
+63383530353139643463356166373231323034343338383333643639396264366633613263616437
+65636466346134316530613765386466613363373865353531326164323832333736336366623166
+64373533343965626635646238386630643431386462633361346165383431646466656539323439
+36386262653735393036663330373364323563393561313739326438363965303637346137636166
+64653036313032313564343335633562353338323464383730323232666230373032643965333362
+39383736313430396338393961616438653834333237373162656532303936363039323062616330
+61396131303963353166343163353436643563336433363235616435386532373830666239323661
+36636265383862613466346335363830343931623766396132393464326464633161
diff --git a/ansible_collections/infinidat/infinibox/meta/runtime.yml b/ansible_collections/infinidat/infinibox/meta/runtime.yml
index 112c4e065..2333e176f 100644
--- a/ansible_collections/infinidat/infinibox/meta/runtime.yml
+++ b/ansible_collections/infinidat/infinibox/meta/runtime.yml
@@ -1 +1 @@
-requires_ansible: ">=2.9.10"
+requires_ansible: ">=2.14.0"
diff --git a/ansible_collections/infinidat/infinibox/playbooks/configure_array.yml b/ansible_collections/infinidat/infinibox/playbooks/configure_array.yml
new file mode 100644
index 000000000..bee0899d1
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/playbooks/configure_array.yml
@@ -0,0 +1,388 @@
+---
+# PSDEV-1108: Create playbook automating cluster configuration mobility.
+# Create three volumes: vol_nonwriteable, vol_explicitly_writable and vol_writable.
+- name: Configure an Infinibox Array
+ hosts: localhost
+ gather_facts: false
+ # vars:
+ # - dataset_default_provisioning: THIN
+ # - use_base2_units: true
+ # - table_export_limit: 3000
+ # - admin_user_name: admin
+ # - admin_user_password: 123456
+ # - admin_user_email: dev.mgmt@infinidat.com
+ # - pool_admin_user_name: Commvault
+ # - pool_admin_user_password: 123456
+ # - pool_admin_user_email: dohlemacher@infinidat.com
+ # - ldap_name: PSUS_ANSIBLE_ad
+ # - setup_alerting_emails: ["dohlemacher@infinidat.com"]
+ # - prod_alerting_emails: ["dohlemacher@infinidat.com"]
+ # - alerting_event_levels: ["INFO", "WARNING", "ERROR", "CRITICAL"]
+ # - alerting_includes: []
+ # - alerting_excludes: ["EVENT_FLOOD", "USER_LOGIN_SUCCESS", "USER_LOGGED_OUT"]
+ # - syslogs: # A list of syslog dictionaries
+ # - target_name: syslog1_target
+ # rule_name: syslog1
+ # protocol: SYSLOG
+ # host: 172.31.88.158
+ # port: 514
+ # facility: LOCAL7
+ # transport: UDP
+ # post_test: true # Not a real test if using UDP
+ # - target_name: syslog2_target
+ # rule_name: syslog2
+ # protocol: SYSLOG
+ # host: 172.31.88.158
+ # port: 515
+ # facility: LOCAL7
+ # transport: UDP
+ # post_test: true
+ # - target_name: graylog_target
+ # rule_name: graylog
+ # protocol: SYSLOG
+ # host: 172.31.77.214
+ # port: 1514
+ # facility: LOCAL7
+ # transport: UDP
+ # post_test: true
+ tasks:
+ - name: Configuration
+ ansible.builtin.debug:
+ msg:
+ - "user: {{ user }}"
+ # - "password: {{ password }}"
+ - "system: {{ system }}"
+
+ - name: Pause
+ ansible.builtin.pause:
+ seconds: 2
+
+ - name: Create temporary setup email notification rule setup_email for addresses {{ setup_alerting_emails }}
+ infinidat.infinibox.infini_notification_rule:
+ name: "setup_email"
+ event_level: "{{ alerting_event_levels }}"
+ include_events: "{{ alerting_includes }}"
+ exclude_events: "{{ alerting_excludes }}"
+ recipients: "{{ setup_alerting_emails }}"
+ state: "present"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: Temporarily remove production email notification rule for {{ prod_alerting_emails }}
+ infinidat.infinibox.infini_notification_rule:
+ name: "production_email"
+ state: "absent"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: Configure Single Sign On (SSO)
+ infinidat.infinibox.infini_sso:
+ issuer: http://www.okta.com/exkra32oyyU6KCUCk2p7
+ name: OKTA
+ sign_on_url: https://infinidat.okta.com/app/infinidat_ibox2503_1/exkrwdi7dmXSKdC4l2p7/sso/saml
+ signed_assertion: false
+ signed_response: false
+ signing_certificate: "{{ sso_signing_certificate }}"
+ enabled: true
+ state: "present"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: Stat Single Sign On (SSO)
+ infinidat.infinibox.infini_sso:
+ name: OKTA
+ state: stat
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: sso_stat
+
+ - name: Show sso_stat
+ ansible.builtin.debug:
+ var: sso_stat
+
+ - name: Enable compression
+ infinidat.infinibox.infini_config:
+ config_group: "mgmt"
+ key: "pool.compression_enabled_default"
+ value: true
+ state: "present"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: Set capacity units
+ infinidat.infinibox.infini_metadata:
+ object_type: "system"
+ key: "ui-dataset-base2-units"
+ value: "{{ use_base2_units }}"
+ state: "present"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: Set dataset default provisioning to {{ dataset_default_provisioning }}
+ infinidat.infinibox.infini_metadata:
+ object_type: "system"
+ key: "ui-dataset-default-provisioning"
+ value: "{{ dataset_default_provisioning }}"
+ state: "present"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: Configure Infinibox - Set maximum export rows to {{ table_export_limit }}
+ infinidat.infinibox.infini_metadata:
+ object_type: "system"
+ key: "ui-table-export-limit"
+ value: "{{ table_export_limit }}"
+ state: "present"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: Configure Infinibox - Setup Active Directory
+ infinidat.infinibox.infini_users_repository:
+ name: "{{ ldap_name }}"
+ bind_password: "tuFrAxahuYe4"
+ bind_username: "conldap"
+ ad_domain_name: "infinidat.com"
+ repository_type: "ActiveDirectory"
+ schema_group_class: "group"
+ schema_group_memberof_attribute: "memberof"
+ schema_group_name_attribute: "cn"
+ schema_groups_basedn: ""
+ schema_user_class: "user"
+ schema_username_attribute: "sAMAccountName"
+ state: "present"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: Test user login using Active Directory credentials
+ infinidat.infinibox.infini_user:
+ user_name: "admin" # Must be an AD account, not local
+ user_password: "123456"
+ state: "login"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: Configure 'CO-ReadOnly' LDAP user group
+ infinidat.infinibox.infini_user:
+ user_ldap_group_name: "CO-ReadOnly"
+ user_ldap_group_dn: "CN=Infinidat,OU=Security Groups,OU=Groups,OU=Corp,DC=infinidat,DC=com"
+ user_ldap_group_ldap: "{{ ldap_name }}"
+ user_ldap_group_role: "read_only"
+ state: "present"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ # - name: Configure 'CO-StorageAdmin' LDAP user group
+ # infinidat.infinibox.infini_user:
+ # user_ldap_group_name: "CO-StorageAdmin"
+ # user_ldap_group_dn: "CN=Infinidat,OU=Security Groups,OU=Groups,OU=Corp,DC=infinidat,DC=com"
+ # user_ldap_group_ldap: "{{ ldap_name }}"
+ # user_ldap_group_role: "admin"
+ # state: "present"
+ # user: "{{ user }}"
+ # password: "{{ password }}"
+ # system: "{{ system }}"
+
+ # - name: Configure 'ETS-CommVault' LDAP user group
+ # infinidat.infinibox.infini_user:
+ # user_ldap_group_name: "ETS-CommVault"
+ # user_ldap_group_dn: "CN=Infinidat,OU=Security Groups,OU=Groups,OU=Corp,DC=infinidat,DC=com"
+ # user_ldap_group_ldap: "{{ ldap_name }}"
+ # user_ldap_group_role: "pool_admin"
+ # user_group_pools: [ "pool-a", "pool-b", "pool-c" ]
+ # state: "present"
+ # user: "{{ user }}"
+ # password: "{{ password }}"
+ # system: "{{ system }}"
+
+ - name: Set up an admin user
+ infinidat.infinibox.infini_user:
+ user_name: "{{ admin_user_name }}"
+ user_email: "{{ admin_user_email }}"
+ user_password: "{{ admin_user_password }}"
+ user_role: "admin"
+ state: "present"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: Remove existing syslog notification rules
+ # Rule removal is required since targets cannot be modified if there rules that use them
+ infinidat.infinibox.infini_notification_rule:
+ name: "{{ item.rule_name }}"
+ state: "absent"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ loop:
+ "{{ syslogs }}"
+
+ - name: Create syslog notification targets
+ infinidat.infinibox.infini_notification_target:
+ state: "present"
+ name: "{{ item.target_name }}"
+ protocol: "{{ item.protocol }}"
+ host: "{{ item.host }}"
+ port: "{{ item.port }}"
+ facility: "{{ item.facility }}"
+ transport: "{{ item.transport }}"
+ post_test: "{{ item.post_test }}" # Force a dummy event for notification to be posted
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ loop:
+ "{{ syslogs }}"
+
+ - name: Create syslog notification rules
+ infinidat.infinibox.infini_notification_rule:
+ name: "{{ item.rule_name }}"
+ target: "{{ item.target_name }}"
+ event_level: "{{ alerting_event_levels }}"
+ include_events: "{{ alerting_includes }}"
+ exclude_events: "{{ alerting_excludes }}"
+ state: "present"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ loop:
+ "{{ syslogs }}"
+
+ - name: Remove replication network space named Replication
+ infinidat.infinibox.infini_network_space:
+ name: Replication
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: Create replication network space named Replication
+ infinidat.infinibox.infini_network_space:
+ name: Replication
+ state: present
+ interfaces:
+ - 80
+ - 81
+ - 82
+ service: RMR_SERVICE
+ netmask: 16
+ network: 172.20.0.0
+ default_gateway: 172.20.95.254
+ # rate_limit: 8
+ # mtu: 1500
+ # async_only: true
+ ips: # IPs borrowed from https://labs.infinidat.com/gdc/systems/psus-vbox-aws44-1/ip/
+ - 172.20.50.111
+ - 172.20.50.70
+ - 172.20.49.243
+ - 172.20.49.241
+ - 172.20.49.239
+ - 172.20.49.237
+ - 172.20.49.235
+ - 172.20.49.233
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ # - name: TODO by David - Configure Infinimetrics - Add Infinibox
+ # ansible.builtin.debug:
+ # msg: (9) Add Infinibox to Infinimetrics
+
+ - name: Create pools
+ infinidat.infinibox.infini_pool:
+ name: "{{ item }}"
+ size: "{{ pool_size }}"
+ vsize: "{{ pool_size }}"
+ state: present
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ loop:
+ - pool-a
+ - pool-b
+ - pool-c
+
+ - name: Set up pool admin user for pool
+ infinidat.infinibox.infini_user:
+ user_name: "{{ pool_admin_user_name }}"
+ user_email: "{{ pool_admin_user_email }}"
+ user_password: "{{ pool_admin_user_password }}"
+ user_role: "pool_admin"
+ user_pool: "pool-a"
+ state: "present"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: Update a fibre channel switch label
+ infinidat.infinibox.infini_fibre_channel_switch:
+ switch_name: VSAN 100
+ new_switch_name: Switch1000
+ state: "rename"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: switch_label_result
+ failed_when: > # WARNING: This should be removed if the array has FC configured
+ ("Cannot find switch" not in switch_label_result.msg)
+
+ - name: Install SSL certificate
+ infinidat.infinibox.infini_certificate:
+ certificate_file_name: /home/stack/workspace/ansible-infinidat-collection/signed-certificate-with-pkey.pem
+ state: "present"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: Pause for a short period since the SSL certificate was updated
+ ansible.builtin.pause:
+ seconds: 30
+
+ - name: Stat SSL certificate
+ infinidat.infinibox.infini_certificate:
+ state: "stat"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: cert_out
+
+ - name: Show SSL stat
+ ansible.builtin.debug:
+ msg: "{{ cert_out }}"
+
+ - name: Create production email notification rule production_email for addresses {{ prod_alerting_emails }}
+ infinidat.infinibox.infini_notification_rule:
+ name: "production_email"
+ event_level: "{{ alerting_event_levels }}"
+ include_events: "{{ alerting_includes }}"
+ exclude_events: "{{ alerting_excludes }}"
+ recipients: "{{ prod_alerting_emails }}"
+ state: "present"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: Configure Infinibox - Post event that Infinibox configuration is complete
+ infinidat.infinibox.infini_event:
+ description_template: Infinibox {{ system }} configuration is complete
+ level: INFO
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: Remove temporary setup email notification rule for {{ setup_alerting_emails }}
+ infinidat.infinibox.infini_notification_rule:
+ name: "setup_email"
+ state: "absent"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
diff --git a/ansible_collections/infinidat/infinibox/playbooks/infinisafe_demo_runtest.yml b/ansible_collections/infinidat/infinibox/playbooks/infinisafe_demo_runtest.yml
index 54978a53e..58f6e3194 100644
--- a/ansible_collections/infinidat/infinibox/playbooks/infinisafe_demo_runtest.yml
+++ b/ansible_collections/infinidat/infinibox/playbooks/infinisafe_demo_runtest.yml
@@ -1,9 +1,8 @@
---
### Localhost
-- hosts: forensics
+- name: Run Infinisafe Demo
+ hosts: forensics
gather_facts: false
- collections:
- - infinidat.infinibox
vars:
network_space: InfiniSafe-Fenced-Network # iSCSI
service: ISCSI_SERVICE
@@ -17,8 +16,8 @@
ibox_iqn: iqn.2009-11.com.infinidat:storage:infinibox-sn-1521
tasks:
- - name: Create {{ service }} network space named {{ network_space }}
- infini_network_space:
+ - name: Create network space
+ infinidat.infinibox.infini_network_space:
name: "{{ network_space }}"
state: present
service: "{{ service }}"
@@ -44,8 +43,8 @@
system: "{{ system }}"
delegate_to: localhost
- - name: Create forensic host {{ host }}
- infini_host:
+ - name: Create forensic host
+ infinidat.infinibox.infini_host:
name: "{{ host }}"
state: present
@@ -54,8 +53,8 @@
system: "{{ system }}"
delegate_to: localhost
- - name: Map snapshot {{ snap }} to host {{ host }}
- infini_map:
+ - name: Map snapshot to host
+ infinidat.infinibox.infini_map:
host: "{{ host }}"
volume: "{{ snap }}"
state: present
@@ -65,8 +64,8 @@
system: "{{ system }}"
delegate_to: localhost
- - name: Add port to host {{ host }}
- infini_port:
+ - name: Add port to host
+ infinidat.infinibox.infini_port:
host: "{{ host }}"
iqns: "{{ host_iqn }}"
state: present
@@ -77,7 +76,7 @@
delegate_to: localhost
### Forensics Host
- - name: Connect forensics host {{ host }} to Infinibox {{ ibox }}
+ - name: Connect forensics host to Infinibox
ansible.builtin.shell: |
iscsiadm --mode discoverydb --type sendtargets --portal {{ ibox_portal }} --discover
iscsiadm --mode node --targetname={{ ibox_iqn }} --op update --name=node.session.auth.username --value={{ user }}
@@ -87,28 +86,28 @@
changed_when: false
become: true
- # Run forensic tests on snapshot {{ snap }}
- - name: Forensically test snapshot {{ snap }} is clean using host {{ host }}
+ # Run forensic tests on snapshot
+ - name: Forensically test snapshot is clean using host
ansible.builtin.shell: |
true
changed_when: false
register: is_snapshot_clean
### Localhost
- - name: debug
+ - name: Debug
ansible.builtin.debug:
msg: Snapshot {{ snap }} PASSED testing
when: is_snapshot_clean.rc == 0
delegate_to: localhost
- - name: debug
+ - name: Debug
ansible.builtin.debug:
msg: Snapshot {{ snap }} FAILED testing. Do not use this snapshot.
when: is_snapshot_clean.rc != 0
delegate_to: localhost
- - name: Restoring volume {{ volume }} from known clean snapshot {{ snap }}
- infini_vol:
+ - name: Restoring volume from known clean snapshot
+ infinidat.infinibox.infini_vol:
name: "{{ snap }}"
state: present
parent_volume_name: "{{ volume }}"
diff --git a/ansible_collections/infinidat/infinibox/playbooks/infinisafe_demo_setup.yml b/ansible_collections/infinidat/infinibox/playbooks/infinisafe_demo_setup.yml
index 5d58b9741..ae82f1d22 100644
--- a/ansible_collections/infinidat/infinibox/playbooks/infinisafe_demo_setup.yml
+++ b/ansible_collections/infinidat/infinibox/playbooks/infinisafe_demo_setup.yml
@@ -1,8 +1,7 @@
---
-- hosts: localhost
+- name: Setup for Infinisafe demo
+ hosts: localhost
gather_facts: true # Required for ansible_date_time
- collections:
- - infinidat.infinibox
vars:
network_space: InfiniSafe-Fenced-Network # iSCSI
service: ISCSI_SERVICE
@@ -13,8 +12,8 @@
host_iqn: iqn.1993-08.org.debian:01:62ebda3b76cc # io-wt-35
tasks:
- - name: Create pool {{ pool }}
- infini_pool:
+ - name: Create pool
+ infinidat.infinibox.infini_pool:
name: "{{ pool }}"
size: 1TB
vsize: 1TB
@@ -24,8 +23,8 @@
password: "{{ password }}"
system: "{{ system }}"
- - name: Create volume {{ volume }} under pool {{ pool }}
- infini_vol:
+ - name: Create volume in pool
+ infinidat.infinibox.infini_vol:
name: "{{ volume }}"
size: 1GB
pool: "{{ pool }}"
@@ -35,8 +34,8 @@
password: "{{ password }}"
system: "{{ system }}"
- - name: Create and lock (1 minute) snapshot {{ snap }} from volume {{ volume }}
- infini_vol:
+ - name: Create and lock (1 minute) snapshot from volume
+ infinidat.infinibox.infini_vol:
name: "{{ snap }}"
state: present
volume_type: snapshot
diff --git a/ansible_collections/infinidat/infinibox/playbooks/infinisafe_demo_teardown.yml b/ansible_collections/infinidat/infinibox/playbooks/infinisafe_demo_teardown.yml
index 6213f6c08..374916ce5 100644
--- a/ansible_collections/infinidat/infinibox/playbooks/infinisafe_demo_teardown.yml
+++ b/ansible_collections/infinidat/infinibox/playbooks/infinisafe_demo_teardown.yml
@@ -1,9 +1,8 @@
---
### Localhost
-- hosts: forensics
+- name: Teardown Infinisafe demo
+ hosts: forensics
gather_facts: false
- collections:
- - infinidat.infinibox
vars:
network_space: InfiniSafe-Fenced-Network # iSCSI
service: ISCSI_SERVICE
@@ -18,8 +17,8 @@
ibox_portals: 172.31.32.148 172.31.32.146 172.31.32.149 172.31.32.145 172.31.32.150 172.31.32.147
tasks:
- - name: Unmap snapshot {{ snap }} from host {{ host }}
- infini_map:
+ - name: Unmap snapshot from host
+ infinidat.infinibox.infini_map:
host: "{{ host }}"
volume: "{{ snap }}"
state: absent
@@ -29,8 +28,8 @@
system: "{{ system }}"
delegate_to: localhost
- - name: Remove port from host {{ host }}
- infini_port:
+ - name: Remove port from host
+ infinidat.infinibox.infini_port:
host: "{{ host }}"
iqns: "{{ host_iqn }}"
state: absent
@@ -41,7 +40,7 @@
delegate_to: localhost
### Forensics Host
- - name: Disconnect forensics host {{ host }} from Infinibox {{ ibox }}
+ - name: Disconnect forensics host from Infinibox
ansible.builtin.shell: |
for i in {{ ibox_portals }}; do
iscsiadm --mode node --target {{ ibox_iqn }} -p $i --logout
@@ -53,8 +52,8 @@
become: true
### Localhost
- - name: Remove network space named {{ network_space }}
- infini_network_space:
+ - name: Remove network space
+ infinidat.infinibox.infini_network_space:
name: "{{ network_space }}"
state: absent
@@ -63,8 +62,8 @@
system: "{{ system }}"
delegate_to: localhost
- - name: Remove snapshot {{ snap }} created from volume {{ volume }}
- infini_vol:
+ - name: Remove snapshot created from volume
+ infinidat.infinibox.infini_vol:
name: "{{ snap }}"
state: absent
@@ -73,8 +72,8 @@
system: "{{ system }}"
delegate_to: localhost
- - name: Remove volume {{ volume }} under pool {{ pool }}
- infini_vol:
+ - name: Remove volume from pool
+ infinidat.infinibox.infini_vol:
name: "{{ volume }}"
pool: "{{ pool }}"
state: absent
@@ -84,8 +83,8 @@
system: "{{ system }}"
delegate_to: localhost
- - name: Remove pool {{ pool }}
- infini_pool:
+ - name: Remove pool
+ infinidat.infinibox.infini_pool:
name: "{{ pool }}"
state: absent
@@ -94,8 +93,8 @@
system: "{{ system }}"
delegate_to: localhost
- - name: Remove forensic host {{ host }}
- infini_host:
+ - name: Remove forensic host
+ infinidat.infinibox.infini_host:
name: "{{ host }}"
state: absent
diff --git a/ansible_collections/infinidat/infinibox/playbooks/inventory b/ansible_collections/infinidat/infinibox/playbooks/inventory
index 15954ef41..bdf610702 100644
--- a/ansible_collections/infinidat/infinibox/playbooks/inventory
+++ b/ansible_collections/infinidat/infinibox/playbooks/inventory
@@ -1,2 +1,16 @@
+#[all]
+#ansible_python_interpreter: "~/workspace/ansible-infinidat-collection/venv/bin/python"
+
[forensics]
io-wt-35.lab.wt.us.infinidat.com ansible_python_interpreter=python3.8
+
+[testers]
+# Defined in /etc/hosts
+exng-centos8 # 172.31.88.153
+exng-jammy # 172.31.88.177
+
+[tester_centos]
+exng-centos8 # 172.31.88.153
+
+[tester_jammy]
+exng-jammy # 172.31.88.177
diff --git a/ansible_collections/infinidat/infinibox/playbooks/test_config_sample.yml b/ansible_collections/infinidat/infinibox/playbooks/test_config_sample.yml
new file mode 100644
index 000000000..7159c61a3
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/playbooks/test_config_sample.yml
@@ -0,0 +1,63 @@
+---
+# PSDEV-1108: Playbook for testing creation of metadata resources.
+- name: Test infini_config module
+ hosts: localhost
+ gather_facts: false # Required for ansible_date_time
+ tasks:
+
+ # Test any object_type
+
+ - name: Set compression setting to true
+ infinidat.infinibox.infini_config:
+ config_group: "mgmt"
+ key: "pool.compression_enabled_default"
+ value: false
+ state: "present"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: metadata_out
+ - name: Debug
+ ansible.builtin.debug:
+ var: metadata_out
+
+ - name: Check current compression setting
+ infinidat.infinibox.infini_config:
+ config_group: "mgmt"
+ key: "pool.compression_enabled_default"
+ state: "stat"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: metadata_out
+ - name: Debug
+ ansible.builtin.debug:
+ var: metadata_out
+
+
+ - name: Check 2 base
+ infinidat.infinibox.infini_config:
+ config_group: "mgmt"
+ key: "mgmt.is_decimal_capacity_converter" # True for Base 10, False for Base 2
+ state: "stat"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: metadata_out
+ - name: Debug
+ ansible.builtin.debug:
+ var: metadata_out
+
+ - name: Set Dataset capacity unit to Base 2
+ infinidat.infinibox.infini_config:
+ config_group: "mgmt"
+ key: "mgmt.is_decimal_capacity_converter" # True for Base 10, False for Base 2
+ value: false
+ state: "present"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: metadata_out
+ - name: Debug
+ ansible.builtin.debug:
+ var: metadata_out
diff --git a/ansible_collections/infinidat/infinibox/playbooks/test_create_map_cluster.yml b/ansible_collections/infinidat/infinibox/playbooks/test_create_map_cluster.yml
index cf807fcbb..84c2fed4e 100644
--- a/ansible_collections/infinidat/infinibox/playbooks/test_create_map_cluster.yml
+++ b/ansible_collections/infinidat/infinibox/playbooks/test_create_map_cluster.yml
@@ -1,12 +1,11 @@
---
-- hosts: localhost
+- name: Test infini_map module
+ hosts: localhost
gather_facts: false
- collections:
- - infinidat.infinibox
tasks:
- - name: NEGATIVE test -> Map volume {{ auto_prefix }}vol specifying both a cluster and a host
- infini_map:
+ - name: NEGATIVE test -> Map volume specifying both a cluster and a host
+ infinidat.infinibox.infini_map:
host: "{{ auto_prefix }}host1"
cluster: "{{ auto_prefix }}cluster"
volume: "{{ auto_prefix }}vol"
@@ -19,8 +18,8 @@
- '"both to be provided" not in result.msg'
- not result.failed
- - name: NEGATIVE test -> Unmap volume {{ auto_prefix }}vol specifying both a cluster and a host
- infini_map:
+ - name: NEGATIVE test -> Unmap volume specifying both a cluster and a host
+ infinidat.infinibox.infini_map:
host: "{{ auto_prefix }}host1"
cluster: "{{ auto_prefix }}cluster"
volume: "{{ auto_prefix }}vol"
@@ -33,8 +32,8 @@
- '"both to be provided" not in result.msg'
- not result.failed
- - name: NEGATIVE test -> Stat volume {{ auto_prefix }}vol specifying both a cluster and a host
- infini_map:
+ - name: NEGATIVE test -> Stat volume specifying both a cluster and a host
+ infinidat.infinibox.infini_map:
host: "{{ auto_prefix }}host1"
cluster: "{{ auto_prefix }}cluster"
volume: "{{ auto_prefix }}vol"
@@ -47,8 +46,8 @@
- '"both to be provided" not in result.msg'
- not result.failed
- - name: POSITIVE test -> Create pool {{ auto_prefix }}pool
- infini_pool:
+ - name: POSITIVE test -> Create pool
+ infinidat.infinibox.infini_pool:
name: "{{ auto_prefix }}pool"
size: 1TB
vsize: 1TB
@@ -57,8 +56,8 @@
password: "{{ password }}"
system: "{{ system }}"
- - name: POSITIVE test -> Create thin volume {{ auto_prefix }}vol under pool {{ auto_prefix }}pool
- infini_vol:
+ - name: POSITIVE test -> Create thin volume
+ infinidat.infinibox.infini_vol:
name: "{{ auto_prefix }}vol"
size: 1GB
pool: "{{ auto_prefix }}pool"
@@ -67,32 +66,32 @@
password: "{{ password }}"
system: "{{ system }}"
- - name: POSITIVE test -> Create host {{ auto_prefix }}host1 for cluster mapping
- infini_host:
+ - name: POSITIVE test -> Create host for cluster mapping
+ infinidat.infinibox.infini_host:
name: "{{ auto_prefix }}host1"
state: present
user: "{{ user }}"
password: "{{ password }}"
system: "{{ system }}"
- - name: POSITIVE test -> Create host {{ auto_prefix }}host2 for cluster mapping
- infini_host:
+ - name: POSITIVE test -> Create host for cluster mapping
+ infinidat.infinibox.infini_host:
name: "{{ auto_prefix }}host2"
state: present
user: "{{ user }}"
password: "{{ password }}"
system: "{{ system }}"
- - name: POSITIVE test -> Create host {{ auto_prefix }}host3 for host mapping
- infini_host:
+ - name: POSITIVE test -> Create host for host mapping
+ infinidat.infinibox.infini_host:
name: "{{ auto_prefix }}host3"
state: present
user: "{{ user }}"
password: "{{ password }}"
system: "{{ system }}"
- - name: POSITIVE test -> Create cluster {{ auto_prefix }}cluster with hosts 1 and 2 for cluster mapping
- infini_cluster:
+ - name: POSITIVE test -> Create cluster with hosts for cluster mapping
+ infinidat.infinibox.infini_cluster:
name: "{{ auto_prefix }}cluster"
cluster_hosts:
- host_name: "{{ auto_prefix }}host1"
@@ -104,8 +103,8 @@
password: "{{ password }}"
system: "{{ system }}"
- - name: POSITIVE test -> Map volume {{ auto_prefix }}vol to cluster {{ auto_prefix }}cluster
- infini_map:
+ - name: POSITIVE test -> Map volume to cluster
+ infinidat.infinibox.infini_map:
cluster: "{{ auto_prefix }}cluster"
volume: "{{ auto_prefix }}vol"
state: present
@@ -113,8 +112,8 @@
password: "{{ password }}"
system: "{{ system }}"
- - name: IDEMPOTENT test -> Map volume {{ auto_prefix }}vol to cluster {{ auto_prefix }}cluster again
- infini_map:
+ - name: IDEMPOTENT test -> Map volume to cluster again
+ infinidat.infinibox.infini_map:
cluster: "{{ auto_prefix }}cluster"
volume: "{{ auto_prefix }}vol"
state: present
@@ -126,8 +125,8 @@
- '"already exists using" not in result.msg'
- result.changed
- - name: POSITIVE test -> Stat map of volume {{ auto_prefix }}vol to cluster {{ auto_prefix }}cluster
- infini_map:
+ - name: POSITIVE test -> Stat map of volume to cluster
+ infinidat.infinibox.infini_map:
cluster: "{{ auto_prefix }}cluster"
volume: "{{ auto_prefix }}vol"
state: stat
@@ -140,8 +139,8 @@
ansible.builtin.debug:
msg: "Map stat: {{ cluster_stat }}"
- - name: POSITIVE test -> Map volume {{ auto_prefix }}vol to host {{ auto_prefix }}host3
- infini_map:
+ - name: POSITIVE test -> Map volume to host
+ infinidat.infinibox.infini_map:
host: "{{ auto_prefix }}host3"
volume: "{{ auto_prefix }}vol"
state: present
@@ -149,8 +148,8 @@
password: "{{ password }}"
system: "{{ system }}"
- - name: IDEMPOTENT test -> Map volume {{ auto_prefix }}vol to host {{ auto_prefix }}host3 again
- infini_map:
+ - name: IDEMPOTENT test -> Map volume to host again
+ infinidat.infinibox.infini_map:
host: "{{ auto_prefix }}host3"
volume: "{{ auto_prefix }}vol"
state: present
@@ -162,8 +161,8 @@
- '"already exists using" not in result.msg'
- result.changed
- - name: POSITIVE test -> Stat map of volume {{ auto_prefix }}vol to host {{ auto_prefix }}host3
- infini_map:
+ - name: POSITIVE test -> Stat map of volume to host
+ infinidat.infinibox.infini_map:
host: "{{ auto_prefix }}host3"
volume: "{{ auto_prefix }}vol"
state: stat
@@ -177,8 +176,8 @@
msg: "Map stat: {{ host_stat }}"
# Since the host is in the cluster and the volume is already mapped to the cluster, mapping the volume to the host becomes a NOOP.
- - name: IDEMPOTENT test -> Map volume {{ auto_prefix }}vol to host {{ auto_prefix }}host1 which is also in {{ auto_prefix }}cluster
- infini_map:
+ - name: IDEMPOTENT test -> Map volume to host which is also in cluster
+ infinidat.infinibox.infini_map:
host: "{{ auto_prefix }}host1"
volume: "{{ auto_prefix }}vol"
state: present
@@ -191,8 +190,8 @@
- result.changed
### Test mapping of volume to a host using specified lun.
- - name: NEGATIVE test -> Map volume {{ auto_prefix }}vol to host {{ auto_prefix }}host3 again using lun 99 when already mapped
- infini_map:
+ - name: NEGATIVE test -> Map volume to host again using lun 99 when already mapped
+ infinidat.infinibox.infini_map:
host: "{{ auto_prefix }}host3"
volume: "{{ auto_prefix }}vol"
lun: 99
@@ -205,8 +204,8 @@
- '"Cannot change the lun" not in new_lun_fail.msg'
- result.changed
- - name: POSITIVE test -> Unmap volume {{ auto_prefix }}vol from host {{ auto_prefix }}host3
- infini_map:
+ - name: POSITIVE test -> Unmap volume from host
+ infinidat.infinibox.infini_map:
host: "{{ auto_prefix }}host3"
volume: "{{ auto_prefix }}vol"
state: absent
@@ -214,8 +213,8 @@
password: "{{ password }}"
system: "{{ system }}"
- - name: POSITIVE test -> Map volume {{ auto_prefix }}vol to host {{ auto_prefix }}host3 using lun 99 when not already mapped
- infini_map:
+ - name: POSITIVE test -> Map volume to host using lun 99 when not already mapped
+ infinidat.infinibox.infini_map:
host: "{{ auto_prefix }}host3"
volume: "{{ auto_prefix }}vol"
lun: 99
@@ -224,8 +223,8 @@
password: "{{ password }}"
system: "{{ system }}"
- - name: POSITIVE test -> Stat map of volume {{ auto_prefix }}vol to host {{ auto_prefix }}host3 to check lun 99 used
- infini_map:
+ - name: POSITIVE test -> Stat map of volume to host to check lun 99 used
+ infinidat.infinibox.infini_map:
host: "{{ auto_prefix }}host3"
volume: "{{ auto_prefix }}vol"
state: stat
@@ -243,8 +242,8 @@
### End test mapping of volume to a host using specified lun.
### Test mapping of volume to a cluster using specified lun.
- - name: NEGATIVE test -> Map volume {{ auto_prefix }}vol to cluster {{ auto_prefix }}cluster again using lun 98 when already mapped
- infini_map:
+ - name: NEGATIVE test -> Map volume to cluster again using lun 98 when already mapped
+ infinidat.infinibox.infini_map:
cluster: "{{ auto_prefix }}cluster"
volume: "{{ auto_prefix }}vol"
lun: 98
@@ -257,8 +256,8 @@
- '"Cannot change the lun" not in new_lun_fail.msg'
- result.changed
- - name: POSITIVE test -> Unmap volume {{ auto_prefix }}vol from cluster {{ auto_prefix }}cluster
- infini_map:
+ - name: POSITIVE test -> Unmap volume from cluster
+ infinidat.infinibox.infini_map:
cluster: "{{ auto_prefix }}cluster"
volume: "{{ auto_prefix }}vol"
state: absent
@@ -266,8 +265,8 @@
password: "{{ password }}"
system: "{{ system }}"
- - name: POSITIVE test -> Map volume {{ auto_prefix }}vol to cluster {{ auto_prefix }}cluster using lun 98 when not already mapped
- infini_map:
+ - name: POSITIVE test -> Map volume to cluster using lun 98 when not already mapped
+ infinidat.infinibox.infini_map:
cluster: "{{ auto_prefix }}cluster"
volume: "{{ auto_prefix }}vol"
lun: 98
@@ -276,8 +275,8 @@
password: "{{ password }}"
system: "{{ system }}"
- - name: POSITIVE test -> Stat map of volume {{ auto_prefix }}vol to cluster {{ auto_prefix }}cluster to check lun 98 used
- infini_map:
+ - name: POSITIVE test -> Stat map of volume to cluster to check lun 98 used
+ infinidat.infinibox.infini_map:
cluster: "{{ auto_prefix }}cluster"
volume: "{{ auto_prefix }}vol"
state: stat
diff --git a/ansible_collections/infinidat/infinibox/playbooks/test_create_metadata.yml b/ansible_collections/infinidat/infinibox/playbooks/test_create_metadata.yml
new file mode 100644
index 000000000..3ba928321
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/playbooks/test_create_metadata.yml
@@ -0,0 +1,813 @@
+---
+# PSDEV-1108: Playbook for testing creation of metadata resources.
+- name: Test infini_metadata module
+ hosts: localhost
+ gather_facts: false # Required for ansible_date_time
+ tasks:
+
+ # Test any object_type
+
+ - name: NEGATIVE test -> Attempt to create a volume metadata key without providing a value
+ infinidat.infinibox.infini_metadata:
+ object_type: "vol"
+ object_name: "foo"
+ key: "foo"
+ # value: "bar"
+ state: "present"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: metadata_out
+ failed_when:
+ - "'Cannot create a' not in metadata_out.msg"
+ - "'without providing a value' not in metadata_out.msg"
+
+ - name: NEGATIVE test -> Attempt to create a system metadata key while incorrectly providing an object_name
+ infinidat.infinibox.infini_metadata:
+ object_type: "system"
+ object_name: "foo" # Error
+ key: "foo"
+ value: "bar"
+ state: "present"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: metadata_out
+ failed_when:
+ - "'object_name for object_type system must not be provided' not in metadata_out.msg"
+
+ - name: NEGATIVE test -> Attempt to create a volume metadata key without providing a object_name
+ infinidat.infinibox.infini_metadata:
+ object_type: "vol"
+ # object_name: "foo"
+ key: "foo"
+ value: "bar"
+ state: "present"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: metadata_out
+ failed_when:
+ - "'The name of the vol must be provided as object_name' not in metadata_out.msg"
+
+ # Test pool object_type
+
+ - name: SETUP test -> Delete pool to clear any metadata
+ infinidat.infinibox.infini_pool:
+ name: "{{ auto_prefix }}pool"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: SETUP test -> Create pool
+ infinidat.infinibox.infini_pool:
+ name: "{{ auto_prefix }}pool"
+ size: "{{ pool_size }}"
+ state: present
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: NEGATIVE test -> Get pool's nonexistent metadata key named poolfoo
+ infinidat.infinibox.infini_metadata:
+ object_type: "pool"
+ object_name: "{{ auto_prefix }}pool"
+ key: "poolfoo"
+ state: "stat"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: metadata_out
+ failed_when:
+ - "'Cannot find pool metadata key' not in metadata_out.msg"
+
+ - name: POSITIVE test -> Create metadata for pool with key named poolfoo with value poolbar
+ infinidat.infinibox.infini_metadata:
+ object_type: "pool"
+ object_name: "{{ auto_prefix }}pool"
+ key: "poolfoo"
+ value: "poolbar"
+ state: "present"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: IDEMPOTENT test -> Create metadata for pool with key named poolfoo with value poolbar again
+ infinidat.infinibox.infini_metadata:
+ object_type: "pool"
+ object_name: "{{ auto_prefix }}pool"
+ key: "poolfoo"
+ value: "poolbar"
+ state: "present"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Stat metadata pool key named poolfoo
+ infinidat.infinibox.infini_metadata:
+ object_type: "pool"
+ object_name: "{{ auto_prefix }}pool"
+ key: "poolfoo"
+ state: "stat"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: metadata_out
+ failed_when:
+ - "metadata_out['value'] != 'poolbar' or metadata_out['changed'] != false"
+
+ - name: POSITIVE test -> Delete metadata pool key named poolfoo
+ infinidat.infinibox.infini_metadata:
+ object_type: "pool"
+ object_name: "{{ auto_prefix }}pool"
+ key: "poolfoo"
+ state: "absent"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: IDEMPOTENT test -> Delete metadata pool key named poolfoo again
+ infinidat.infinibox.infini_metadata:
+ object_type: "pool"
+ object_name: "{{ auto_prefix }}pool"
+ key: "poolfoo"
+ state: "absent"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: NEGATIVE test -> Stat nonexistent metadata pool key named poolfoo
+ infinidat.infinibox.infini_metadata:
+ object_type: "pool"
+ object_name: "{{ auto_prefix }}pool"
+ key: "poolfoo"
+ state: "stat"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: metadata_out
+ failed_when:
+ - "'Cannot find pool metadata key' not in metadata_out.msg"
+
+ # Test vol object_type
+
+ - name: SETUP test -> Delete volume in pool to clear any metadata
+ infinidat.infinibox.infini_vol:
+ name: "{{ auto_prefix }}vol"
+ pool: "{{ auto_prefix }}pool"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: SETUP test -> Create volume in pool
+ infinidat.infinibox.infini_vol:
+ name: "{{ auto_prefix }}vol"
+ pool: "{{ auto_prefix }}pool"
+ size: 1 GB
+ state: present
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: NEGATIVE test -> Get volume's nonexistent metadata key named volfoo
+ infinidat.infinibox.infini_metadata:
+ object_type: "vol"
+ object_name: "{{ auto_prefix }}vol"
+ key: "volfoo"
+ state: "stat"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: metadata_out
+ failed_when:
+ - "'Cannot find vol metadata key' not in metadata_out.msg"
+
+ - name: POSITIVE test -> Create metadata for volume with key named volfoo with value volbar
+ infinidat.infinibox.infini_metadata:
+ object_type: "vol"
+ object_name: "{{ auto_prefix }}vol"
+ key: "volfoo"
+ value: "volbar"
+ state: "present"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: IDEMPOTENT test -> Create metadata for volume with key named volfoo with value volbar again
+ infinidat.infinibox.infini_metadata:
+ object_type: "vol"
+ object_name: "{{ auto_prefix }}vol"
+ key: "volfoo"
+ value: "volbar"
+ state: "present"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Stat metadata volume key named volfoo
+ infinidat.infinibox.infini_metadata:
+ object_type: "vol"
+ object_name: "{{ auto_prefix }}vol"
+ key: "volfoo"
+ state: "stat"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: metadata_out
+ failed_when:
+ - "metadata_out['value'] != 'volbar' or metadata_out['changed'] != false"
+
+ - name: POSITIVE test -> Delete metadata volume key named volfoo
+ infinidat.infinibox.infini_metadata:
+ object_type: "vol"
+ object_name: "{{ auto_prefix }}vol"
+ key: "volfoo"
+ state: "absent"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: IDEMPOTENT test -> Delete metadata volume key named volfoo again
+ infinidat.infinibox.infini_metadata:
+ object_type: "vol"
+ object_name: "{{ auto_prefix }}vol"
+ key: "volfoo"
+ state: "absent"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: NEGATIVE test -> Stat nonexistent metadata volume key named volfoo
+ infinidat.infinibox.infini_metadata:
+ object_type: "vol"
+ object_name: "{{ auto_prefix }}vol"
+ key: "volfoo"
+ state: "stat"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: metadata_out
+ failed_when:
+ - "'Cannot find vol metadata key' not in metadata_out.msg"
+
+ # Test vol-snap object_type
+
+ - name: SETUP test -> Delete volume snapshot in pool to clear any metadata
+ infinidat.infinibox.infini_vol:
+ name: "{{ auto_prefix }}volsnap"
+ pool: "{{ auto_prefix }}pool"
+ volume_type: "snapshot"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: NEGATIVE test -> Stat nonexistent metadata volume snapshot key named volsnapfoo when snapshot does not exist
+ infinidat.infinibox.infini_metadata:
+ object_type: "vol-snap"
+ object_name: "{{ auto_prefix }}volsnap"
+ key: "volsnapfoo"
+ state: "stat"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: metadata_out
+ failed_when:
+ - "'Volume snapshot named' not in metadata_out.msg or 'not found' not in metadata_out.msg"
+
+ - name: SETUP test -> Create volume in pool
+ infinidat.infinibox.infini_vol:
+ name: "{{ auto_prefix }}vol"
+ pool: "{{ auto_prefix }}pool"
+ size: 1 GB
+ state: present
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: SETUP test -> Create volume snapshot in pool
+ infinidat.infinibox.infini_vol:
+ name: "{{ auto_prefix }}volsnap"
+ volume_type: "snapshot"
+ parent_volume_name: "{{ auto_prefix }}vol"
+ state: present
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: NEGATIVE test -> Get volume snapshot's nonexistent metadata key named volsnapfoo
+ infinidat.infinibox.infini_metadata:
+ object_type: "vol-snap"
+ object_name: "{{ auto_prefix }}volsnap"
+ key: "volsnapfoo"
+ state: "stat"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: metadata_out
+ failed_when:
+ - "'Cannot find vol-snap metadata key' not in metadata_out.msg"
+
+ - name: POSITIVE test -> Create metadata for volume snapshot with key named volsnapfoo with value volsnapbar
+ infinidat.infinibox.infini_metadata:
+ object_type: "vol-snap"
+ object_name: "{{ auto_prefix }}volsnap"
+ key: "volsnapfoo"
+ value: "volsnapbar"
+ state: "present"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: IDEMPOTENT test -> Create metadata for volume snapshot with key named volsnapfoo with value volsnapbar again
+ infinidat.infinibox.infini_metadata:
+ object_type: "vol-snap"
+ object_name: "{{ auto_prefix }}volsnap"
+ key: "volsnapfoo"
+ value: "volsnapbar"
+ state: "present"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Stat metadata volume snapshot key named volsnapfoo
+ infinidat.infinibox.infini_metadata:
+ object_type: "vol-snap"
+ object_name: "{{ auto_prefix }}volsnap"
+ key: "volsnapfoo"
+ state: "stat"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: metadata_out
+ failed_when:
+ - "metadata_out['value'] != 'volsnapbar' or metadata_out['changed'] != false"
+
+ - name: POSITIVE test -> Delete metadata volume snapshot key named volsnapfoo
+ infinidat.infinibox.infini_metadata:
+ object_type: "vol-snap"
+ object_name: "{{ auto_prefix }}volsnap"
+ key: "volsnapfoo"
+ state: "absent"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: IDEMPOTENT test -> Delete metadata volume snapshot key named volsnapfoo again
+ infinidat.infinibox.infini_metadata:
+ object_type: "vol-snap"
+ object_name: "{{ auto_prefix }}volsnap"
+ key: "volsnapfoo"
+ state: "absent"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: NEGATIVE test -> Stat nonexistent metadata volume snapshot key named volsnapfoo
+ infinidat.infinibox.infini_metadata:
+ object_type: "vol-snap"
+ object_name: "{{ auto_prefix }}volsnap"
+ key: "volsnapfoo"
+ state: "stat"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: metadata_out
+ failed_when:
+ - "'Cannot find vol-snap metadata key' not in metadata_out.msg"
+
+ # Test system object_type
+
+ - name: NEGATIVE test -> Attempt to set system metadata key ui-dataset-base2-units to something other than an boolean
+ infinidat.infinibox.infini_metadata:
+ object_type: "system"
+ key: "ui-dataset-base2-units"
+ value: "bar" # Should be a boolean
+ state: "present"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: metadata_out
+ failed_when:
+ - "'Value must be able to be decoded as a boolean' not in metadata_out.msg"
+
+ - name: POSITIVE test -> Delete metadata system key named sysfoo to clear any metadata
+ infinidat.infinibox.infini_metadata:
+ object_type: "system"
+ key: "sysfoo"
+ state: "absent"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: NEGATIVE test -> Get nonexistent system metadata key named sysfoo
+ infinidat.infinibox.infini_metadata:
+ object_type: "system"
+ key: "sysfoo"
+ state: "stat"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: metadata_out
+ failed_when:
+ - "'Metadata for system with key sysfoo not found' not in metadata_out.msg"
+
+ - name: POSITIVE test -> Create metadata system key named sysfoo with value sysbar
+ infinidat.infinibox.infini_metadata:
+ object_type: "system"
+ key: "sysfoo"
+ value: "sysbar"
+ state: "present"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: IDEMPOTENT test -> Create metadata system key named sysfoo with value sysbar again
+ infinidat.infinibox.infini_metadata:
+ object_type: "system"
+ key: "sysfoo"
+ value: "sysbar"
+ state: "present"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Stat metadata system key named sysfoo
+ infinidat.infinibox.infini_metadata:
+ object_type: "system"
+ key: "sysfoo"
+ state: "stat"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: metadata_out
+
+ - name: POSITIVE test -> Delete metadata system key named sysfoo
+ infinidat.infinibox.infini_metadata:
+ object_type: "system"
+ key: "sysfoo"
+ state: "absent"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: IDEMPOTENT test -> Delete metadata system key named sysfoo again
+ infinidat.infinibox.infini_metadata:
+ object_type: "system"
+ key: "sysfoo"
+ state: "absent"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Stat metadata system key named ui-dataset-default-provisioning
+ infinidat.infinibox.infini_metadata:
+ object_type: "system"
+ key: "ui-dataset-default-provisioning"
+ state: "stat"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Create metadata system key named ui-dataset-default-provisioning with value THICK
+ infinidat.infinibox.infini_metadata:
+ object_type: "system"
+ key: "ui-dataset-default-provisioning"
+ value: "THICK"
+ state: "present"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Stat metadata system key named ui-dataset-default-provisioning
+ infinidat.infinibox.infini_metadata:
+ object_type: "system"
+ key: "ui-dataset-default-provisioning"
+ state: "stat"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: metadata_out
+ failed_when:
+ - "'THICK' != metadata_out['value']"
+
+ - name: POSITIVE test -> Create metadata system key named ui-dataset-default-provisioning with value THIN
+ infinidat.infinibox.infini_metadata:
+ object_type: "system"
+ key: "ui-dataset-default-provisioning"
+ value: "THIN"
+ state: "present"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: Negative test -> Attempt to create metadata system key named ui-dataset-default-provisioning with value invalid_value
+ infinidat.infinibox.infini_metadata:
+ object_type: "system"
+ key: "ui-dataset-default-provisioning"
+ value: "invalid_value"
+ state: "present"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: metadata_out
+ failed_when:
+ - "'Cannot create system metadata' not in metadata_out.msg"
+
+ - name: POSITIVE test -> Create metadata system key named ui-table-export-limit with value 2000
+ infinidat.infinibox.infini_metadata:
+ object_type: "system"
+ key: "ui-table-export-limit"
+ value: 2000
+ state: "present"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: NEGATIVE test -> Attempt to create metadata system key named ui-table-export-limit with value invalid_value
+ infinidat.infinibox.infini_metadata:
+ object_type: "system"
+ key: "ui-table-export-limit"
+ value: "invalid_value"
+ state: "present"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: metadata_out
+ failed_when:
+ - "'Value must be of type integer' not in metadata_out.msg"
+
+
+ # Test fs object_type
+
+ - name: SETUP test -> Delete fs to clear any metadata
+ infinidat.infinibox.infini_fs:
+ name: "{{ auto_prefix }}fs"
+ pool: "{{ auto_prefix }}pool"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: SETUP test -> Create fs
+ infinidat.infinibox.infini_fs:
+ name: "{{ auto_prefix }}fs"
+ pool: "{{ auto_prefix }}pool"
+ size: 1 GB
+ state: present
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: NEGATIVE test -> Get fs's nonexistent metadata key named fsfoo
+ infinidat.infinibox.infini_metadata:
+ object_type: "fs"
+ object_name: "{{ auto_prefix }}fs"
+ key: "fsfoo"
+ state: "stat"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: metadata_out
+ failed_when:
+ - "'Cannot find fs metadata key' not in metadata_out.msg"
+
+ - name: POSITIVE test -> Create fs's metadata key named fsfoo with value fsbar
+ infinidat.infinibox.infini_metadata:
+ object_type: "fs"
+ object_name: "{{ auto_prefix }}fs"
+ key: "fsfoo"
+ value: "fsbar"
+ state: "present"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: IDEMPOTENT test -> Create fs's metadata key named fsfoo again
+ infinidat.infinibox.infini_metadata:
+ object_type: "fs"
+ object_name: "{{ auto_prefix }}fs"
+ key: "fsfoo"
+ value: "fsbar"
+ state: "present"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ # Test fs-snap object_type
+
+ - name: SETUP test -> Delete fs snapshot to clear any metadata
+ infinidat.infinibox.infini_fs:
+ name: "{{ auto_prefix }}fssnap"
+ pool: "{{ auto_prefix }}pool"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: NEGATIVE test -> Create fs snapshot but use default fs_type, master, with a parent_fs_name
+ infinidat.infinibox.infini_fs:
+ name: "{{ auto_prefix }}fssnap"
+ pool: "{{ auto_prefix }}pool"
+ parent_fs_name: "{{ auto_prefix }}fs"
+ state: present
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: metadata_out
+ failed_when:
+ - "'parent_fs_name should not be specified if fs_type is' not in metadata_out.msg"
+
+ - name: SETUP test -> Create fs snapshot
+ infinidat.infinibox.infini_fs:
+ name: "{{ auto_prefix }}fssnap"
+ pool: "{{ auto_prefix }}pool"
+ fs_type: "snapshot"
+ parent_fs_name: "{{ auto_prefix }}fs"
+ state: present
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: NEGATIVE test -> Get fs snapshot's nonexistent metadata key named fssnapfoo
+ infinidat.infinibox.infini_metadata:
+ object_type: "fs-snap"
+ object_name: "{{ auto_prefix }}fssnap"
+ key: "fssnapfoo"
+ state: "stat"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: metadata_out
+ failed_when:
+ - "'Cannot find fs-snap metadata key' not in metadata_out.msg"
+
+ - name: POSITIVE test -> Create fs snapshot's metadata key named fssnapfoo with value fssnapbar
+ infinidat.infinibox.infini_metadata:
+ object_type: "fs-snap"
+ object_name: "{{ auto_prefix }}fssnap"
+ key: "fssnapfoo"
+ value: "fssnapbar"
+ state: "present"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: IDEMPOTENT test -> Create fs snapshot's metadata key named fssnapfoo with value fssnapbar again
+ infinidat.infinibox.infini_metadata:
+ object_type: "fs-snap"
+ object_name: "{{ auto_prefix }}fssnap"
+ key: "fssnapfoo"
+ value: "fssnapbar"
+ state: "present"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ # Test cluster object_type
+
+ - name: SETUP test -> Delete cluster to clear any metadata
+ infinidat.infinibox.infini_cluster:
+ name: "{{ auto_prefix }}cluster"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: SETUP test -> Create cluster
+ infinidat.infinibox.infini_cluster:
+ name: "{{ auto_prefix }}cluster"
+ state: present
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: NEGATIVE test -> Get cluster's nonexistent metadata key named clusterfoo
+ infinidat.infinibox.infini_metadata:
+ object_type: "cluster"
+ object_name: "{{ auto_prefix }}cluster"
+ key: "clusterfoo"
+ state: "stat"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: metadata_out
+ failed_when:
+ - "'Cannot find cluster metadata key' not in metadata_out.msg"
+
+ - name: POSITIVE test -> Create cluster's metadata key named clusterfoo
+ infinidat.infinibox.infini_metadata:
+ object_type: "cluster"
+ object_name: "{{ auto_prefix }}cluster"
+ key: "clusterfoo"
+ value: "clusterbar"
+ state: "present"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: IDEMPOTENT test -> Create cluster's metadata key named clusterfoo again
+ infinidat.infinibox.infini_metadata:
+ object_type: "cluster"
+ object_name: "{{ auto_prefix }}cluster"
+ key: "clusterfoo"
+ value: "clusterbar"
+ state: "present"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Delete cluster's metadata key named clusterfoo
+ infinidat.infinibox.infini_metadata:
+ object_type: "cluster"
+ object_name: "{{ auto_prefix }}cluster"
+ key: "clusterfoo"
+ state: "absent"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: IDEMPOTENT test -> Delete cluster's metadata key named clusterfoo again
+ infinidat.infinibox.infini_metadata:
+ object_type: "cluster"
+ object_name: "{{ auto_prefix }}cluster"
+ key: "clusterfoo"
+ state: "absent"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ # Test host object_type
+
+ - name: SETUP test -> Delete host to clear any metadata
+ infinidat.infinibox.infini_host:
+ name: "{{ auto_prefix }}host"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: SETUP test -> Create host
+ infinidat.infinibox.infini_host:
+ name: "{{ auto_prefix }}host"
+ state: present
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: NEGATIVE test -> Get host's nonexistent metadata key named hostfoo
+ infinidat.infinibox.infini_metadata:
+ object_type: "host"
+ object_name: "{{ auto_prefix }}host"
+ key: "hostfoo"
+ state: "stat"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: metadata_out
+ failed_when:
+ - "'Cannot find host metadata key' not in metadata_out.msg"
+
+ - name: POSITIVE test -> Create host's metadata key named hostfoo
+ infinidat.infinibox.infini_metadata:
+ object_type: "host"
+ object_name: "{{ auto_prefix }}host"
+ key: "hostfoo"
+ value: "hostbar"
+ state: "present"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: IDEMPOTENT test -> Create host's metadata key named hostfoo again
+ infinidat.infinibox.infini_metadata:
+ object_type: "host"
+ object_name: "{{ auto_prefix }}host"
+ key: "hostfoo"
+ value: "hostbar"
+ state: "present"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Delete host's metadata key named hostfoo
+ infinidat.infinibox.infini_metadata:
+ object_type: "host"
+ object_name: "{{ auto_prefix }}host"
+ key: "hostfoo"
+ state: "absent"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: IDEMPOTENT test -> Delete host's metadata key named hostfoo again
+ infinidat.infinibox.infini_metadata:
+ object_type: "host"
+ object_name: "{{ auto_prefix }}host"
+ key: "hostfoo"
+ state: "absent"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
diff --git a/ansible_collections/infinidat/infinibox/playbooks/test_create_network_spaces.yml b/ansible_collections/infinidat/infinibox/playbooks/test_create_network_spaces.yml
index b8bced550..16c880bfd 100644
--- a/ansible_collections/infinidat/infinibox/playbooks/test_create_network_spaces.yml
+++ b/ansible_collections/infinidat/infinibox/playbooks/test_create_network_spaces.yml
@@ -1,16 +1,15 @@
---
-- hosts: localhost
+- name: Test infini_network_space module
+ hosts: localhost
gather_facts: false
- collections:
- - infinidat.infinibox
vars:
- name: iSCSI
+ service_name: iSCSI
service: ISCSI_SERVICE
tasks:
- - name: POSITIVE test -> Create {{ service }} network space named {{ name }}
- infini_network_space:
- name: "{{ name }}"
+ - name: POSITIVE test -> Create network space
+ infinidat.infinibox.infini_network_space:
+ name: "{{ service_name }}"
state: present
interfaces:
- 1680
@@ -34,13 +33,13 @@
system: "{{ system }}"
register: create_out
- - name: debug
+ - name: Debug
ansible.builtin.debug:
var: create_out
- - name: POSITIVE test -> Stat {{ service }} network space named {{ name }}
- infini_network_space:
- name: "{{ name }}"
+ - name: POSITIVE test -> Stat network space named
+ infinidat.infinibox.infini_network_space:
+ name: "{{ service_name }}"
state: stat
user: "{{ user }}"
password: "{{ password }}"
diff --git a/ansible_collections/infinidat/infinibox/playbooks/test_create_resources.yml b/ansible_collections/infinidat/infinibox/playbooks/test_create_resources.yml
index 3ca9b5542..715b83284 100644
--- a/ansible_collections/infinidat/infinibox/playbooks/test_create_resources.yml
+++ b/ansible_collections/infinidat/infinibox/playbooks/test_create_resources.yml
@@ -1,12 +1,11 @@
---
-- hosts: localhost
+- name: Test Create Resources
+ hosts: localhost
gather_facts: true # Required for ansible_date_time
- collections:
- - infinidat.infinibox
tasks:
- - name: NEGATIVE test -> Create file system named {{ auto_prefix }}fs under a pool that does not exist
- infini_fs:
+ - name: NEGATIVE test -> Create file system under a pool that does not exist
+ infinidat.infinibox.infini_fs:
name: "{{ auto_prefix }}fs"
size: 1GB
pool: "{{ auto_prefix }}pool_missing"
@@ -18,27 +17,27 @@
failed_when: "'pool_missing' not in result.msg"
when: not ansible_check_mode
- - name: POSITIVE test -> Create pool {{ auto_prefix }}pool
- infini_pool:
+ - name: POSITIVE test -> Create pool
+ infinidat.infinibox.infini_pool:
name: "{{ auto_prefix }}pool"
- size: 1TB
- vsize: 1TB
+ size: "{{ pool_size }}"
+ vsize: "{{ pool_size }}"
state: present
user: "{{ user }}"
password: "{{ password }}"
system: "{{ system }}"
- - name: POSITIVE test -> Stat pool {{ auto_prefix }}pool
- infini_pool:
+ - name: POSITIVE test -> Stat pool
+ infinidat.infinibox.infini_pool:
name: "{{ auto_prefix }}pool"
state: stat
user: "{{ user }}"
password: "{{ password }}"
system: "{{ system }}"
- - name: POSITIVE test -> Create default THIN, file system named {{ auto_prefix }}fs_default under pool {{ auto_prefix }}pool
+ - name: POSITIVE test -> Create default THIN file system
# See IBox settings: Dataset default provisioning.
- infini_fs:
+ infinidat.infinibox.infini_fs:
name: "{{ auto_prefix }}fs_default"
size: 1GB
pool: "{{ auto_prefix }}pool"
@@ -47,8 +46,8 @@
password: "{{ password }}"
system: "{{ system }}"
- - name: POSITIVE test -> Stat file system named {{ auto_prefix }}fs_default under pool {{ auto_prefix }}pool
- infini_fs:
+ - name: POSITIVE test -> Stat file system
+ infinidat.infinibox.infini_fs:
name: "{{ auto_prefix }}fs_default"
size: 1GB
pool: "{{ auto_prefix }}pool"
@@ -58,11 +57,12 @@
system: "{{ system }}"
register: def_stat
- - debug:
+ - name: DEBUG test -> Show file system stat
+ ansible.builtin.debug:
msg: stat - {{ def_stat }}
- - name: POSITIVE test -> Create THICK file system named {{ auto_prefix }}fs_thick under pool {{ auto_prefix }}pool
- infini_fs:
+ - name: POSITIVE test -> Create THICK file system
+ infinidat.infinibox.infini_fs:
name: "{{ auto_prefix }}fs_thick"
size: 1GB
pool: "{{ auto_prefix }}pool"
@@ -72,8 +72,8 @@
password: "{{ password }}"
system: "{{ system }}"
- - name: POSITIVE test -> Create THIN file system named {{ auto_prefix }}fs under pool {{ auto_prefix }}pool
- infini_fs:
+ - name: POSITIVE test -> Create THIN file system
+ infinidat.infinibox.infini_fs:
name: "{{ auto_prefix }}fs"
size: 1GB
pool: "{{ auto_prefix }}pool"
@@ -83,8 +83,8 @@
password: "{{ password }}"
system: "{{ system }}"
- - name: POSITIVE test -> Change THIN file system named {{ auto_prefix }}fs to THICK provisioning
- infini_fs:
+ - name: POSITIVE test -> Change THIN file system to THICK provisioning
+ infinidat.infinibox.infini_fs:
name: "{{ auto_prefix }}fs"
size: 1GB
pool: "{{ auto_prefix }}pool"
@@ -94,8 +94,8 @@
password: "{{ password }}"
system: "{{ system }}"
- - name: POSITIVE test -> Change THICK file system named {{ auto_prefix }}fs back to THIN provisioning
- infini_fs:
+ - name: POSITIVE test -> Change THICK file system back to THIN provisioning
+ infinidat.infinibox.infini_fs:
name: "{{ auto_prefix }}fs"
size: 1GB
pool: "{{ auto_prefix }}pool"
@@ -105,17 +105,154 @@
password: "{{ password }}"
system: "{{ system }}"
- - name: POSITIVE test -> Stat file system named {{ auto_prefix }}fs under pool {{ auto_prefix }}pool
- infini_fs:
+ - name: POSITIVE test -> Stat file system
+ infinidat.infinibox.infini_fs:
name: "{{ auto_prefix }}fs"
pool: "{{ auto_prefix }}pool"
state: stat
user: "{{ user }}"
password: "{{ password }}"
system: "{{ system }}"
+ register: output
+
+ - name: DEBUG test -> Show file system stat
+ ansible.builtin.debug:
+ var: output
+
+ - name: POSITIVE test -> Stat file system that matches serial number
+ infinidat.infinibox.infini_fs:
+ serial: "{{ output.serial }}"
+ pool: "{{ auto_prefix }}pool"
+ state: stat
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: stat_out
+
+ - name: DEBUG test -> Show file system stat
+ ansible.builtin.debug:
+ var: stat_out
+
+ - name: POSITIVE test -> Create file system snapshot
+ infinidat.infinibox.infini_fs:
+ name: "{{ auto_prefix }}fs_snap"
+ pool: "{{ auto_prefix }}pool"
+ state: present
+ fs_type: snapshot
+ parent_fs_name: "{{ auto_prefix }}fs"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Restore parent file system from snapshot
+ infinidat.infinibox.infini_fs:
+ name: "{{ auto_prefix }}fs_snap"
+ pool: "{{ auto_prefix }}pool"
+ state: present
+ fs_type: snapshot
+ parent_fs_name: "{{ auto_prefix }}fs"
+ restore_fs_from_snapshot: true
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: DEBUG test -> Find the current date-time. Time marks when the playbook gathered facts
+ ansible.builtin.debug:
+ var: ansible_date_time.iso8601_micro
+
+ # Note: For collection filters, Ansible does not honor the
+ # collections list at the top of this file.
+ # One must use a FQCN for filters such as
+ # infinidat.infinibox.delta_time.
+ - name: POSITIVE test -> Create snapshot from file system. Lock for 2 minutes.
+ infinidat.infinibox.infini_fs:
+ name: "{{ auto_prefix }}fs_snap_locked"
+ pool: "{{ auto_prefix }}pool"
+ state: present
+ fs_type: snapshot
+ parent_fs_name: "{{ auto_prefix }}fs"
+ snapshot_lock_expires_at: "{{ ansible_date_time.iso8601_micro | to_datetime(fmt) | infinidat.infinibox.delta_time(minutes=2) }}"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ vars:
+ fmt: "%Y-%m-%dT%H:%M:%S.%fZ"
+
+ - name: POSITIVE test -> Extend lock to 3 minutes without refresh
+ infinidat.infinibox.infini_fs:
+ name: "{{ auto_prefix }}fs_snap_locked"
+ pool: "{{ auto_prefix }}pool"
+ state: present
+ fs_type: snapshot
+ parent_fs_name: "{{ auto_prefix }}fs"
+ snapshot_lock_expires_at: "{{ ansible_date_time.iso8601_micro | to_datetime(fmt) | infinidat.infinibox.delta_time(minutes=3) }}"
+ snapshot_lock_only: true
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ vars:
+ fmt: "%Y-%m-%dT%H:%M:%S.%fZ"
+
+ - name: NEGATIVE test -> Attempt to extend file system snapshot lock without refresh on a file system snapshot that does not exist.
+ infinidat.infinibox.infini_fs:
+ name: "{{ auto_prefix }}fs_snap_locked_missing"
+ pool: "{{ auto_prefix }}pool"
+ state: present
+ fs_type: snapshot
+ parent_fs_name: "{{ auto_prefix }}fs"
+ snapshot_lock_expires_at: "{{ ansible_date_time.iso8601_micro | to_datetime(fmt) | infinidat.infinibox.delta_time(minutes=3) }}"
+ snapshot_lock_only: true
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: result
+ failed_when: "not result.msg | regex_search('Snapshot does not exist. Cannot comply')"
+ vars:
+ fmt: "%Y-%m-%dT%H:%M:%S.%fZ"
+
+ - name: NEGATIVE test -> Attempt to create file system snapshot locked for 31 days. Exceeds 30 day maximum lock time enforced by infini_fs module (not API)
+ infinidat.infinibox.infini_fs:
+ name: "{{ auto_prefix }}fs_snap_locked_too_long"
+ pool: "{{ auto_prefix }}pool"
+ state: present
+ fs_type: snapshot
+ parent_fs_name: "{{ auto_prefix }}fs"
+ snapshot_lock_expires_at: "{{ ansible_date_time.iso8601_micro | to_datetime(fmt) | infinidat.infinibox.delta_time(days=31) }}"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: result
+ failed_when: "not result.msg | regex_search('snapshot_lock_expires_at exceeds.*days in the future')"
+ vars:
+ fmt: "%Y-%m-%dT%H:%M:%S.%fZ"
+
+ - name: NEGATIVE test -> Attempt to remove locked file system snapshot
+ infinidat.infinibox.infini_fs:
+ name: "{{ auto_prefix }}fs_snap_locked"
+ pool: "{{ auto_prefix }}pool"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: result
+ failed_when: "'Cannot delete snapshot. Locked' not in result.msg"
- - name: POSITIVE test -> Export file system {{ auto_prefix }}fs
- infini_export:
+ - name: POSITIVE test -> Wait for lock on file system to expire
+ ansible.builtin.pause:
+ seconds: 181
+ prompt: Waiting for {{ auto_prefix }}fs_snap_locked to expire
+
+ - name: POSITIVE test -> Remove file system snapshot with expired lock
+ infinidat.infinibox.infini_fs:
+ name: "{{ auto_prefix }}fs_snap_locked"
+ pool: "{{ auto_prefix }}pool"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Export file system
+ infinidat.infinibox.infini_export:
name: "/{{ auto_prefix }}export"
filesystem: "{{ auto_prefix }}fs"
client_list:
@@ -127,8 +264,8 @@
password: "{{ password }}"
system: "{{ system }}"
- - name: POSITIVE test -> Stat file system export {{ auto_prefix }}export
- infini_export:
+ - name: POSITIVE test -> Stat file system export
+ infinidat.infinibox.infini_export:
name: "/{{ auto_prefix }}export"
filesystem: "{{ auto_prefix }}fs"
state: stat
@@ -136,8 +273,8 @@
password: "{{ password }}"
system: "{{ system }}"
- - name: NEGATIVE test -> Export file system {{ auto_prefix }}fs that does not exist
- infini_export:
+ - name: NEGATIVE test -> Export file system that does not exist
+ infinidat.infinibox.infini_export:
name: "/{{ auto_prefix }}export_missing"
filesystem: "{{ auto_prefix }}fs_missing"
client_list:
@@ -152,8 +289,8 @@
failed_when: "not result.msg | regex_search('File system.*not found')"
when: not ansible_check_mode
- - name: POSITIVE test -> Create export client for export /{{ auto_prefix }}export
- infini_export_client:
+ - name: POSITIVE test -> Create export client for export
+ infinidat.infinibox.infini_export_client:
export: "/{{ auto_prefix }}export"
client: "20.20.20.20"
state: present
@@ -165,7 +302,7 @@
# TODO - stat export client
- name: NEGATIVE test -> Create export client for export that does not exist
- infini_export_client:
+ infinidat.infinibox.infini_export_client:
export: "/{{ auto_prefix }}export_missing"
client: 20.20.20.20
state: present
@@ -177,8 +314,8 @@
failed_when: "not result.msg | regex_search('Export.*not found')"
when: not ansible_check_mode
- - name: NEGATIVE test -> Create volume {{ auto_prefix }}vol_in_pool_missing under pool that does not exist
- infini_vol:
+ - name: NEGATIVE test -> Create volume under pool that does not exist
+ infinidat.infinibox.infini_vol:
name: "{{ auto_prefix }}vol_in_pool_missing"
size: 1GB
pool: "{{ auto_prefix }}pool_missing"
@@ -191,8 +328,8 @@
# failed_when: "'pool_missing' not in result.msg"
when: not ansible_check_mode
- - name: POSITIVE test -> Create thin volume {{ auto_prefix }}vol under pool {{ auto_prefix }}pool
- infini_vol:
+ - name: POSITIVE test -> Create thin volume in existing pool
+ infinidat.infinibox.infini_vol:
name: "{{ auto_prefix }}vol"
size: 1GB
pool: "{{ auto_prefix }}pool"
@@ -201,8 +338,8 @@
password: "{{ password }}"
system: "{{ system }}"
- - name: POSITIVE test -> Create thick volume {{ auto_prefix }}vol_thick under pool {{ auto_prefix }}pool
- infini_vol:
+ - name: POSITIVE test -> Create thick volume under pool
+ infinidat.infinibox.infini_vol:
name: "{{ auto_prefix }}vol_thick"
size: 1GB
pool: "{{ auto_prefix }}pool"
@@ -213,26 +350,31 @@
password: "{{ password }}"
system: "{{ system }}"
- - name: POSITIVE test -> Stat volume {{ auto_prefix }}vol under pool {{ auto_prefix }}pool
- register: output
- infini_vol:
+ - name: POSITIVE test -> Stat volume under pool
+ infinidat.infinibox.infini_vol:
name: "{{ auto_prefix }}vol"
pool: "{{ auto_prefix }}pool"
state: stat
user: "{{ user }}"
password: "{{ password }}"
system: "{{ system }}"
+ register: output
- name: POSITIVE test -> Stat vol serial number
- infini_vol:
+ infinidat.infinibox.infini_vol:
state: stat
serial: "{{ output.serial }}"
user: "{{ user }}"
password: "{{ password }}"
system: "{{ system }}"
+ register: stat_out
+
+ - name: DEBUG test -> Show volume stat with serial number
+ ansible.builtin.debug:
+ var: stat_out
- - name: POSITIVE test -> Create snapshot {{ auto_prefix }}vol_snap from volume {{ auto_prefix }}vol
- infini_vol:
+ - name: POSITIVE test -> Create snapshot from volume
+ infinidat.infinibox.infini_vol:
name: "{{ auto_prefix }}vol_snap"
state: present
volume_type: snapshot
@@ -241,34 +383,34 @@
password: "{{ password }}"
system: "{{ system }}"
- - name: DEBUG test -> Find the current date-time (WARNING - Time marks when the playbook gathered facts)
+ - name: DEBUG test -> Find the current date-time. Time marks when the playbook gathered facts.
ansible.builtin.debug:
- var=ansible_date_time.iso8601_micro
+ var: ansible_date_time.iso8601_micro
# Note: For collection filters, Ansible does not honor the
# collections list at the top of this file.
# One must use a FQCN for filters such as
# infinidat.infinibox.delta_time.
- - name: POSITIVE test -> Create and lock for 2 minutes snapshot {{ auto_prefix }}vol_snap_locked from volume {{ auto_prefix }}vol
- infini_vol:
+ - name: POSITIVE test -> Create and lock for 5 minutes snapshot from volume
+ infinidat.infinibox.infini_vol:
name: "{{ auto_prefix }}vol_snap_locked"
state: present
volume_type: snapshot
parent_volume_name: "{{ auto_prefix }}vol"
- snapshot_lock_expires_at: "{{ ansible_date_time.iso8601_micro | to_datetime(fmt) | infinidat.infinibox.delta_time(minutes=2) }}"
+ snapshot_lock_expires_at: "{{ ansible_date_time.iso8601_micro | to_datetime(fmt) | infinidat.infinibox.delta_time(minutes=5) }}"
user: "{{ user }}"
password: "{{ password }}"
system: "{{ system }}"
vars:
fmt: "%Y-%m-%dT%H:%M:%S.%fZ"
- - name: POSITIVE test -> Extend lock to 3 minutes for {{ auto_prefix }}vol_snap_locked without refresh
- infini_vol:
+ - name: POSITIVE test -> Extend lock to 6 minutes for without refresh
+ infinidat.infinibox.infini_vol:
name: "{{ auto_prefix }}vol_snap_locked"
state: present
volume_type: snapshot
parent_volume_name: "{{ auto_prefix }}vol"
- snapshot_lock_expires_at: "{{ ansible_date_time.iso8601_micro | to_datetime(fmt) | infinidat.infinibox.delta_time(minutes=3) }}"
+ snapshot_lock_expires_at: "{{ ansible_date_time.iso8601_micro | to_datetime(fmt) | infinidat.infinibox.delta_time(minutes=6) }}"
snapshot_lock_only: true
user: "{{ user }}"
password: "{{ password }}"
@@ -277,12 +419,12 @@
fmt: "%Y-%m-%dT%H:%M:%S.%fZ"
- name: NEGATIVE test -> Attempt to extend lock without refresh on a snapshot that does not exist.
- infini_vol:
+ infinidat.infinibox.infini_vol:
name: "{{ auto_prefix }}vol_snap_locked_missing"
state: present
volume_type: snapshot
parent_volume_name: "{{ auto_prefix }}vol"
- snapshot_lock_expires_at: "{{ ansible_date_time.iso8601_micro | to_datetime(fmt) | infinidat.infinibox.delta_time(minutes=3) }}"
+ snapshot_lock_expires_at: "{{ ansible_date_time.iso8601_micro | to_datetime(fmt) | infinidat.infinibox.delta_time(minutes=6) }}"
snapshot_lock_only: true
user: "{{ user }}"
password: "{{ password }}"
@@ -292,8 +434,8 @@
vars:
fmt: "%Y-%m-%dT%H:%M:%S.%fZ"
- - name: NEGATIVE test -> Attempt to create snapshot locked for 31 days, 31 days exceeds 30 day maximum lock time enforced by infini_vol module (not api)
- infini_vol:
+ - name: NEGATIVE test -> Attempt to create snapshot locked for 31 days, 31 days exceeds 30 day maximum lock time enforced by infini_vol module (not API)
+ infinidat.infinibox.infini_vol:
name: "{{ auto_prefix }}vol_snap_locked_too_long"
state: present
volume_type: snapshot
@@ -307,8 +449,8 @@
vars:
fmt: "%Y-%m-%dT%H:%M:%S.%fZ"
- - name: NEGATIVE test -> Attempt to remove locked snapshot {{ auto_prefix }}vol_snap_locked
- infini_vol:
+ - name: NEGATIVE test -> Attempt to remove locked volume snapshot
+ infinidat.infinibox.infini_vol:
name: "{{ auto_prefix }}vol_snap_locked"
state: absent
user: "{{ user }}"
@@ -317,21 +459,21 @@
register: result
failed_when: "'Cannot delete snapshot. Locked' not in result.msg"
- - name: POSITIVE test -> Wait for lock on {{ auto_prefix }}vol_snap_locked to expire
+ - name: POSITIVE test -> Wait for lock on volume to expire
ansible.builtin.pause:
seconds: 181
prompt: Waiting for {{ auto_prefix }}vol_snap_locked to expire
- - name: POSITIVE test -> Remove snapshot {{ auto_prefix }}vol_snap_locked with expired lock
- infini_vol:
+ - name: POSITIVE test -> Remove snapshot with expired lock
+ infinidat.infinibox.infini_vol:
name: "{{ auto_prefix }}vol_snap_locked"
state: absent
user: "{{ user }}"
password: "{{ password }}"
system: "{{ system }}"
- - name: POSITIVE test -> Create host {{ auto_prefix }}host
- infini_host:
+ - name: POSITIVE test -> Create host
+ infinidat.infinibox.infini_host:
name: "{{ auto_prefix }}host"
state: present
user: "{{ user }}"
@@ -339,24 +481,24 @@
system: "{{ system }}"
# Second host used for testing infini_cluster
- - name: POSITIVE test -> Create host {{ auto_prefix }}host2
- infini_host:
+ - name: POSITIVE test -> Create second host
+ infinidat.infinibox.infini_host:
name: "{{ auto_prefix }}host2"
state: present
user: "{{ user }}"
password: "{{ password }}"
system: "{{ system }}"
- - name: POSITIVE test -> Stat host {{ auto_prefix }}host
- infini_host:
+ - name: POSITIVE test -> Stat host
+ infinidat.infinibox.infini_host:
name: "{{ auto_prefix }}host"
state: stat
user: "{{ user }}"
password: "{{ password }}"
system: "{{ system }}"
- - name: POSITIVE test -> Add one wwns port to {{ auto_prefix }}host
- infini_port:
+ - name: POSITIVE test -> Add one wwns port to host
+ infinidat.infinibox.infini_port:
host: PSUS_ANSIBLE_host
state: present
wwns:
@@ -366,8 +508,8 @@
password: "{{ password }}"
system: "{{ system }}"
- - name: POSITIVE test -> Add a second wwn port plus two iscsi ports to {{ auto_prefix }}host
- infini_port:
+ - name: POSITIVE test -> Add a second wwn port plus two iscsi ports to host
+ infinidat.infinibox.infini_port:
host: PSUS_ANSIBLE_host
state: present
wwns:
@@ -379,24 +521,24 @@
password: "{{ password }}"
system: "{{ system }}"
- - name: POSITIVE test -> Stat ports on {{ auto_prefix }}host
- infini_port:
+ - name: POSITIVE test -> Stat ports on host
+ infinidat.infinibox.infini_port:
host: PSUS_ANSIBLE_host
state: stat
user: "{{ user }}"
password: "{{ password }}"
system: "{{ system }}"
- - name: POSITIVE test -> Stat host {{ auto_prefix }}host after ports added
- infini_host:
+ - name: POSITIVE test -> Stat host after ports added
+ infinidat.infinibox.infini_host:
name: "{{ auto_prefix }}host"
state: stat
user: "{{ user }}"
password: "{{ password }}"
system: "{{ system }}"
- - name: NEGATIVE test -> Map volume {{ auto_prefix }}vol to host that does not exist
- infini_map:
+ - name: NEGATIVE test -> Map volume to host that does not exist
+ infinidat.infinibox.infini_map:
host: "{{ auto_prefix }}host_missing"
volume: "{{ auto_prefix }}vol"
state: present
@@ -407,8 +549,8 @@
failed_when: "not result.msg | regex_search('Neither host.*nor cluster.*found')"
when: not ansible_check_mode
- - name: POSITIVE test -> Map volume {{ auto_prefix }}vol to host {{ auto_prefix }}host
- infini_map:
+ - name: POSITIVE test -> Map volume to host
+ infinidat.infinibox.infini_map:
host: "{{ auto_prefix }}host"
volume: "{{ auto_prefix }}vol"
state: present
@@ -417,14 +559,15 @@
system: "{{ system }}"
- name: POSITIVE test -> Rescan after mapping
- shell: |
+ ansible.builtin.shell: |
rescan-scsi-bus.sh
- become: True
+ become: true
register: rescan
failed_when: "rescan.rc != 0 and 'not found' not in rescan.stderr"
+ changed_when: true
- - name: POSITIVE test -> Stat map of volume {{ auto_prefix }}vol to host {{ auto_prefix }}host
- infini_map:
+ - name: POSITIVE test -> Stat map of volume to host
+ infinidat.infinibox.infini_map:
host: "{{ auto_prefix }}host"
volume: "{{ auto_prefix }}vol"
state: stat
@@ -432,8 +575,8 @@
password: "{{ password }}"
system: "{{ system }}"
- - name: POSITIVE test -> Remove mapping of volume {{ auto_prefix }}vol from host {{ auto_prefix }}host
- infini_map:
+ - name: POSITIVE test -> Remove mapping of volume from host
+ infinidat.infinibox.infini_map:
host: "{{ auto_prefix }}host"
volume: "{{ auto_prefix }}vol"
state: absent
@@ -442,14 +585,15 @@
system: "{{ system }}"
- name: POSITIVE test -> Rescan with remove option after removing mapping
- shell: |
+ ansible.builtin.shell: |
rescan-scsi-bus.sh --remove
- become: True
+ become: true
register: rescan
failed_when: "rescan.rc != 0 and 'not found' not in rescan.stderr"
+ changed_when: true
- - name: POSITIVE test -> Map volume {{ auto_prefix }}vol to host {{ auto_prefix }}host using lun 0
- infini_map:
+ - name: POSITIVE test -> Map volume to host using lun 0
+ infinidat.infinibox.infini_map:
host: "{{ auto_prefix }}host"
volume: "{{ auto_prefix }}vol"
lun: 0
@@ -459,14 +603,15 @@
system: "{{ system }}"
- name: POSITIVE test -> Rescan after mapping
- shell: |
+ ansible.builtin.shell: |
rescan-scsi-bus.sh
- become: True
+ become: true
register: rescan
failed_when: "rescan.rc != 0 and 'not found' not in rescan.stderr"
+ changed_when: true
- - name: POSITIVE test -> Stat map of volume {{ auto_prefix }}vol to host {{ auto_prefix }}host using lun 0
- infini_map:
+ - name: POSITIVE test -> Stat map of volume to host using lun 0
+ infinidat.infinibox.infini_map:
host: "{{ auto_prefix }}host"
volume: "{{ auto_prefix }}vol"
state: stat
@@ -484,42 +629,42 @@
tags:
- dev
- - name: POSITIVE test -> Create user {{ auto_prefix }}pool_admin_user with pool_admin role managing pool {{ auto_prefix }}pool
- infini_user:
+ - name: POSITIVE test -> Create user with pool_admin role managing pool
+ infinidat.infinibox.infini_user:
user_name: "{{ auto_prefix }}pool_admin_user"
user_email: "{{ auto_prefix }}pool_admin_user@example.com"
user_password: "secret1"
user_role: "pool_admin"
- user_enabled: "true"
+ user_enabled: true
user_pool: "{{ auto_prefix }}pool"
state: present
user: "{{ user }}"
password: "{{ password }}"
system: "{{ system }}"
- - name: POSITIVE test -> Disable user {{ auto_prefix }}pool_admin_user
- infini_user:
+ - name: POSITIVE test -> Disable user
+ infinidat.infinibox.infini_user:
user_name: "{{ auto_prefix }}pool_admin_user"
user_email: "{{ auto_prefix }}pool_admin_user@example.com"
user_password: "secret1"
user_role: "pool_admin"
- user_enabled: "false"
+ user_enabled: false
user_pool: "{{ auto_prefix }}pool"
state: present
user: "{{ user }}"
password: "{{ password }}"
system: "{{ system }}"
- - name: POSITIVE test -> Stat user {{ auto_prefix }}pool_admin_user
- infini_user:
+ - name: POSITIVE test -> Stat disabled user
+ infinidat.infinibox.infini_user:
user_name: "{{ auto_prefix }}pool_admin_user"
state: stat
user: "{{ user }}"
password: "{{ password }}"
system: "{{ system }}"
- - name: POSITIVE test -> Reset password for user {{ auto_prefix }}pool_admin_user
- infini_user:
+ - name: POSITIVE test -> Reset password for user
+ infinidat.infinibox.infini_user:
user_name: "{{ auto_prefix }}pool_admin_user"
user_password: "secret_new"
state: reset_password
@@ -527,8 +672,8 @@
password: "{{ password }}"
system: "{{ system }}"
- - name: POSITIVE test -> Create user {{ auto_prefix }}admin_user with admin role
- infini_user:
+ - name: POSITIVE test -> Create user with admin role
+ infinidat.infinibox.infini_user:
user_name: "{{ auto_prefix }}admin_user"
user_email: "{{ auto_prefix }}admin_user@example.com"
user_password: "secret2"
@@ -539,8 +684,8 @@
password: "{{ password }}"
system: "{{ system }}"
- - name: POSITIVE test -> Create user {{ auto_prefix }}read_only_user with read_only role
- infini_user:
+ - name: POSITIVE test -> Create user with read_only role
+ infinidat.infinibox.infini_user:
user_name: "{{ auto_prefix }}read_only_user"
user_email: "{{ auto_prefix }}read_only_user@example.com"
user_password: "secret3"
@@ -551,8 +696,16 @@
password: "{{ password }}"
system: "{{ system }}"
- - name: POSITIVE test -> Create cluster {{ auto_prefix }}cluster with two hosts
- infini_cluster:
+ - name: POSITIVE test -> Create cluster with zero hosts
+ infinidat.infinibox.infini_cluster:
+ name: "{{ auto_prefix }}cluster_zero_hosts"
+ state: present
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Create cluster with two hosts
+ infinidat.infinibox.infini_cluster:
name: "{{ auto_prefix }}cluster"
cluster_hosts:
- host_name: "{{ auto_prefix }}host"
@@ -564,8 +717,8 @@
password: "{{ password }}"
system: "{{ system }}"
- - name: POSITIVE test -> Remove {{ auto_prefix }}host from cluster {{ auto_prefix }}cluster. Leave {{ auto_prefix }}host2.
- infini_cluster:
+ - name: POSITIVE test -> Remove host from cluster. Leave second host in cluster.
+ infinidat.infinibox.infini_cluster:
name: "{{ auto_prefix }}cluster"
cluster_hosts:
- host_name: "{{ auto_prefix }}host"
@@ -577,8 +730,8 @@
password: "{{ password }}"
system: "{{ system }}"
- - name: POSITIVE test -> Re-add {{ auto_prefix }}host to cluster {{ auto_prefix }}cluster. Remove {{ auto_prefix }}host2.
- infini_cluster:
+ - name: POSITIVE test -> Re-add host to cluster. Remove second host.
+ infinidat.infinibox.infini_cluster:
name: "{{ auto_prefix }}cluster"
cluster_hosts:
- host_name: "{{ auto_prefix }}host"
@@ -590,8 +743,8 @@
password: "{{ password }}"
system: "{{ system }}"
- - name: IDEMPOTENT test -> Re-add {{ auto_prefix }}host to cluster {{ auto_prefix }}cluster again. Remove {{ auto_prefix }}host2 again.
- infini_cluster:
+ - name: IDEMPOTENT test -> Re-add host to cluster again. Remove second host again.
+ infinidat.infinibox.infini_cluster:
name: "{{ auto_prefix }}cluster"
cluster_hosts:
- host_name: "{{ auto_prefix }}host"
@@ -603,8 +756,8 @@
password: "{{ password }}"
system: "{{ system }}"
- - name: NEGATIVE test -> Attempt to add {{ auto_prefix }}host to cluster {{ auto_prefix }}cluster with missing host_cluster_state key:value
- infini_cluster:
+ - name: NEGATIVE test -> Attempt to add host to cluster with missing host_cluster_state key and value
+ infinidat.infinibox.infini_cluster:
name: "{{ auto_prefix }}cluster"
cluster_hosts:
- host_name: "{{ auto_prefix }}host"
@@ -616,8 +769,8 @@
register: result
failed_when: "'require host_name and host_cluster_state' not in result.msg"
- - name: NEGATIVE test -> Attempt to add {{ auto_prefix }}host to cluster {{ auto_prefix }}cluster with an unsupported key:value
- infini_cluster:
+ - name: NEGATIVE test -> Attempt to add host to cluster with an unsupported key and value
+ infinidat.infinibox.infini_cluster:
name: "{{ auto_prefix }}cluster"
cluster_hosts:
- host_name: "{{ auto_prefix }}host"
@@ -628,4 +781,4 @@
password: "{{ password }}"
system: "{{ system }}"
register: result
- failed_when: "'require host_name and host_cluster_state' not in result.msg"
+ failed_when: "'require host_name and host_cluster_state' not in result.msg"
diff --git a/ansible_collections/infinidat/infinibox/playbooks/test_create_snapshots.yml b/ansible_collections/infinidat/infinibox/playbooks/test_create_snapshots.yml
index bb4db264e..32adf3c18 100644
--- a/ansible_collections/infinidat/infinibox/playbooks/test_create_snapshots.yml
+++ b/ansible_collections/infinidat/infinibox/playbooks/test_create_snapshots.yml
@@ -1,22 +1,21 @@
---
-- hosts: localhost
+- name: Test creating snapshots using infini_vol module
+ hosts: localhost
gather_facts: true # Required for ansible_date_time
- collections:
- - infinidat.infinibox
tasks:
- - name: POSITIVE test -> Create pool {{ auto_prefix }}pool
- infini_pool:
+ - name: POSITIVE test -> Create pool
+ infinidat.infinibox.infini_pool:
name: "{{ auto_prefix }}pool"
- size: 1TB
- vsize: 1TB
+ size: "{{ pool_size }}"
+ vsize: "{{ pool_size }}"
state: present
user: "{{ user }}"
password: "{{ password }}"
system: "{{ system }}"
- - name: POSITIVE test -> Create thin volume {{ auto_prefix }}vol under pool {{ auto_prefix }}pool
- infini_vol:
+ - name: POSITIVE test -> Create thin volume under pool
+ infinidat.infinibox.infini_vol:
name: "{{ auto_prefix }}vol"
size: 1GB
pool: "{{ auto_prefix }}pool"
@@ -25,8 +24,8 @@
password: "{{ password }}"
system: "{{ system }}"
- - name: POSITIVE test -> Create snapshot {{ auto_prefix }}vol_snap from volume {{ auto_prefix }}vol
- infini_vol:
+ - name: POSITIVE test -> Create snapshot from volume
+ infinidat.infinibox.infini_vol:
name: "{{ auto_prefix }}vol_snap"
state: present
volume_type: snapshot
@@ -35,16 +34,16 @@
password: "{{ password }}"
system: "{{ system }}"
- - name: DEBUG test -> Find the current date-time (WARNING - Time marks when the playbook gathered facts)
+ - name: DEBUG test -> Find the current date-time. Time marks when the playbook gathered facts.
ansible.builtin.debug:
- var=ansible_date_time.iso8601_micro
+ var: ansible_date_time.iso8601_micro
# Note: For collection filters, Ansible does not honor the
# collections list at the top of this file.
# One must use a FQCN for filters such as
# infinidat.infinibox.delta_time.
- - name: POSITIVE test -> Create and lock for 2 minutes snapshot {{ auto_prefix }}vol_snap_locked from volume {{ auto_prefix }}vol
- infini_vol:
+ - name: POSITIVE test -> Create and lock for 2 minutes snapshot from volume
+ infinidat.infinibox.infini_vol:
name: "{{ auto_prefix }}vol_snap_locked"
state: present
volume_type: snapshot
@@ -56,8 +55,8 @@
vars:
fmt: "%Y-%m-%dT%H:%M:%S.%fZ"
- - name: POSITIVE test -> Extend lock to 3 minutes for {{ auto_prefix }}vol_snap_locked without refresh
- infini_vol:
+ - name: POSITIVE test -> Extend lock to 3 minutes for snapshot without refresh
+ infinidat.infinibox.infini_vol:
name: "{{ auto_prefix }}vol_snap_locked"
state: present
volume_type: snapshot
@@ -70,8 +69,8 @@
vars:
fmt: "%Y-%m-%dT%H:%M:%S.%fZ"
- - name: NEGATIVE test -> Attempt to create snapshot locked for 31 days, 31 days exceeds 30 day maximum lock time enforced by infini_vol module (not API)
- infini_vol:
+ - name: NEGATIVE test -> Attempt to create snapshot locked for 31 days. Exceeds 30 day maximum lock time enforced by infini_vol module (not API)
+ infinidat.infinibox.infini_vol:
name: "{{ auto_prefix }}vol_snap_locked_too_long"
state: present
volume_type: snapshot
@@ -85,8 +84,8 @@
vars:
fmt: "%Y-%m-%dT%H:%M:%S.%fZ"
- - name: NEGATIVE test -> Attempt to remove locked snapshot {{ auto_prefix }}vol_snap_locked
- infini_vol:
+ - name: NEGATIVE test -> Attempt to remove locked snapshot
+ infinidat.infinibox.infini_vol:
name: "{{ auto_prefix }}vol_snap_locked"
state: absent
user: "{{ user }}"
@@ -95,13 +94,13 @@
register: result
failed_when: "'Cannot delete snapshot. Locked' not in result.msg"
- - name: POSITIVE test -> Wait for lock on {{ auto_prefix }}vol_snap_locked to expire
+ - name: POSITIVE test -> Wait for lock on snapshot to expire
ansible.builtin.pause:
seconds: 181
prompt: Waiting for {{ auto_prefix }}vol_snap_locked to expire
- - name: POSITIVE test -> Lock expired. Remove snapshot {{ auto_prefix }}vol_snap_locked.
- infini_vol:
+ - name: POSITIVE test -> Lock expired. Remove snapshot.
+ infinidat.infinibox.infini_vol:
name: "{{ auto_prefix }}vol_snap_locked"
state: absent
user: "{{ user }}"
diff --git a/ansible_collections/infinidat/infinibox/playbooks/test_create_volumes.yml b/ansible_collections/infinidat/infinibox/playbooks/test_create_volumes.yml
new file mode 100644
index 000000000..ccd2dd400
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/playbooks/test_create_volumes.yml
@@ -0,0 +1,93 @@
+---
+# See PSDEV-1000: Fix infini_vols handling of write_protected parameter
+# Create three volumes: vol_nonwriteable, vol_explicitly_writable and vol_writable.
+- name: Test infini_vol module
+ hosts: localhost
+ gather_facts: true # Required for ansible_date_time
+ tasks:
+
+ - name: POSITIVE test -> Create pool
+ infinidat.infinibox.infini_pool:
+ name: "{{ auto_prefix }}pool"
+ size: "{{ pool_size }}"
+ vsize: "{{ pool_size }}"
+ state: present
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Create write protected volume
+ infinidat.infinibox.infini_vol:
+ name: "{{ auto_prefix }}vol_nonwriteable"
+ size: 1GB
+ write_protected: true
+ pool: "{{ auto_prefix }}pool"
+ state: present
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Create explicitly writable volume
+ infinidat.infinibox.infini_vol:
+ name: "{{ auto_prefix }}vol_explicitly_writable"
+ size: 1GB
+ write_protected: false
+ pool: "{{ auto_prefix }}pool"
+ thin_provision: false
+ state: present
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Create implicitly writable volume
+ infinidat.infinibox.infini_vol:
+ name: "{{ auto_prefix }}vol_writable"
+ size: 1GB
+ pool: "{{ auto_prefix }}pool"
+ thin_provision: false
+ state: present
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Stat nonwritable volume
+ infinidat.infinibox.infini_vol:
+ name: "{{ auto_prefix }}vol_nonwriteable"
+ pool: "{{ auto_prefix }}pool"
+ state: stat
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: stat_nonwritable
+
+ - name: DEBUG test -> Stat non writable volume
+ ansible.builtin.debug:
+ msg: stat - {{ stat_nonwritable }}
+
+ - name: POSITIVE test -> Stat explicitely writable volume
+ infinidat.infinibox.infini_vol:
+ name: "{{ auto_prefix }}vol_explicitly_writable"
+ pool: "{{ auto_prefix }}pool"
+ state: stat
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: stat_explicitely_writable
+
+ - name: DEBUG test -> Stat explicitly writable volume
+ ansible.builtin.debug:
+ msg: stat - {{ stat_explicitely_writable }}
+
+ - name: POSITIVE test -> Stat implicitely writable volume
+ infinidat.infinibox.infini_vol:
+ name: "{{ auto_prefix }}vol_writable"
+ pool: "{{ auto_prefix }}pool"
+ state: stat
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: stat_writable
+
+ - name: DEBUG test -> Stat implicitely writable volume
+ ansible.builtin.debug:
+ msg: stat - {{ stat_writable }}
diff --git a/ansible_collections/infinidat/infinibox/playbooks/test_notification_rules_sample.yml b/ansible_collections/infinidat/infinibox/playbooks/test_notification_rules_sample.yml
new file mode 100644
index 000000000..0633e7bee
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/playbooks/test_notification_rules_sample.yml
@@ -0,0 +1,111 @@
+---
+# PSDEV-1108: Playbook for testing creation of metadata resources.
+- name: Test infini_notification_rule module
+ hosts: localhost
+ gather_facts: false # Required for ansible_date_time
+ tasks:
+
+ - name: Delete a notification rule
+ infinidat.infinibox.infini_notification_rule:
+ name: "test-rule-to-target" # this need to be uniq
+ state: "absent"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: metadata_out
+
+ - name: Debug
+ ansible.builtin.debug:
+ var: metadata_out
+
+ - name: Delete notification targets
+ infinidat.infinibox.infini_notification_target:
+ state: absent
+ name: testgraylog1
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: targets_out
+
+ - name: Debug
+ ansible.builtin.debug:
+ var: targets_out
+
+ - name: Create notification targets
+ infinidat.infinibox.infini_notification_target:
+ state: present
+ name: testgraylog1
+ protocol: SYSLOG
+ host: 172.31.77.214
+ port: 8067
+ facility: LOCAL7
+ transport: TCP
+ visibility: CUSTOMER
+ post_test: true
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: targets_out
+
+ - name: Debug
+ ansible.builtin.debug:
+ var: targets_out
+
+ - name: Create a new notification rule to a target
+ infinidat.infinibox.infini_notification_rule:
+ name: "test-rule-to-target" # this need to be uniq
+ event_level:
+ - ERROR
+ - CRITICAL
+ include_events:
+ - ACTIVATION_PAUSED
+ exclude_events:
+ - ACTIVE_DIRECTORY_ALL_DOMAIN_CONTROLLERS_DOWN
+ - ACTIVE_DIRECTORY_LEFT
+ target: testgraylog1
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: metadata_out
+
+ - name: Debug
+ ansible.builtin.debug:
+ var: metadata_out
+
+ - name: Create a new notification rule with emails
+ infinidat.infinibox.infini_notification_rule:
+ name: "test-rule-with-emails" # this need to be uniq
+ event_level:
+ - ERROR
+ - CRITICAL
+ include_events:
+ - ACTIVATION_PAUSED
+ exclude_events:
+ - ACTIVE_DIRECTORY_ALL_DOMAIN_CONTROLLERS_DOWN
+ - ACTIVE_DIRECTORY_LEFT
+ recipients:
+ - wwang@infinidat.com
+ - wei.w.wang@gmail.com
+ state: "present"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: metadata_out
+
+ - name: Debug
+ ansible.builtin.debug:
+ var: metadata_out
+
+ # # Test any object_type
+ # - name: Delete a rule
+ # infinidat.infinibox.infini_notification_rule:
+ # name: "testw2" # this need to be uniq
+ # state: "absent"
+ # user: "{{ user }}"
+ # password: "{{ password }}"
+ # system: "{{ system }}"
+ # register: metadata_out
+ #
+ # - name: Debug
+ # ansible.builtin.debug:
+ # var: metadata_out
diff --git a/ansible_collections/infinidat/infinibox/playbooks/test_remove_map_cluster.yml b/ansible_collections/infinidat/infinibox/playbooks/test_remove_map_cluster.yml
index 8aaa765fb..b2ffaa94d 100644
--- a/ansible_collections/infinidat/infinibox/playbooks/test_remove_map_cluster.yml
+++ b/ansible_collections/infinidat/infinibox/playbooks/test_remove_map_cluster.yml
@@ -1,12 +1,11 @@
---
-- hosts: localhost
+- name: Clean up after test of infini_map
+ hosts: localhost
gather_facts: false
- collections:
- - infinidat.infinibox
tasks:
- - name: POSITIVE test -> Stat mapping of volume {{ auto_prefix }}vol to host {{ auto_prefix }}host1
- infini_map:
+ - name: POSITIVE test -> Stat mapping of volume to host
+ infinidat.infinibox.infini_map:
host: "{{ auto_prefix }}host1"
volume: "{{ auto_prefix }}vol"
state: stat
@@ -23,8 +22,8 @@
ansible.builtin.debug:
msg: "Map stat: {{ host_stat }}"
- - name: POSITIVE test -> Stat mapping of volume {{ auto_prefix }}vol to cluster {{ auto_prefix }}cluster
- infini_map:
+ - name: POSITIVE test -> Stat mapping of volume to cluster
+ infinidat.infinibox.infini_map:
cluster: "{{ auto_prefix }}cluster"
volume: "{{ auto_prefix }}vol"
state: stat
@@ -42,8 +41,8 @@
# WARNING: Assume we have a vol mapped to a cluster with hosts host1 and host2.
# Then we unmap the vol from "host1" as shown below.
- - name: POSITIVE test -> Remove mapping of volume {{ auto_prefix }}vol from host {{ auto_prefix }}host1
- infini_map:
+ - name: POSITIVE test -> Remove mapping of volume from host
+ infinidat.infinibox.infini_map:
host: "{{ auto_prefix }}host1"
volume: "{{ auto_prefix }}vol"
state: absent
@@ -52,8 +51,8 @@
system: "{{ system }}"
# This will UNMAP the vol from the entire CLUSTER!
- - name: NEGATIVE test -> Stat removed mapping of volume {{ auto_prefix }}vol from host {{ auto_prefix }}host1
- infini_map:
+ - name: NEGATIVE test -> Stat removed mapping of volume from host
+ infinidat.infinibox.infini_map:
host: "{{ auto_prefix }}host1"
volume: "{{ auto_prefix }}vol"
state: stat
@@ -69,8 +68,8 @@
ansible.builtin.debug:
msg: "Map stat: {{ host_stat }}"
- - name: NEGATIVE test -> Stat removed mapping of volume {{ auto_prefix }}vol from cluster {{ auto_prefix }}cluster
- infini_map:
+ - name: NEGATIVE test -> Stat removed mapping of volume from cluster
+ infinidat.infinibox.infini_map:
cluster: "{{ auto_prefix }}cluster"
volume: "{{ auto_prefix }}vol"
state: stat
@@ -86,8 +85,8 @@
ansible.builtin.debug:
msg: "Map stat: {{ cluster_stat }}"
- - name: POSITIVE test -> Remove mapping of volume {{ auto_prefix }}vol from host {{ auto_prefix }}host3
- infini_map:
+ - name: POSITIVE test -> Remove mapping of volume from host
+ infinidat.infinibox.infini_map:
host: "{{ auto_prefix }}host3"
volume: "{{ auto_prefix }}vol"
state: absent
@@ -95,8 +94,8 @@
password: "{{ password }}"
system: "{{ system }}"
- - name: IDEMPOTENT test -> Remove mapping of volume {{ auto_prefix }}vol from host {{ auto_prefix }}host3 again
- infini_map:
+ - name: IDEMPOTENT test -> Remove mapping of volume from host again
+ infinidat.infinibox.infini_map:
host: "{{ auto_prefix }}host3"
volume: "{{ auto_prefix }}vol"
state: absent
@@ -112,8 +111,8 @@
ansible.builtin.debug:
msg: "result: {{ result.msg }}"
- - name: POSITIVE test -> Remove mapping of volume {{ auto_prefix }}vol from cluster {{ auto_prefix }}cluster
- infini_map:
+ - name: POSITIVE test -> Remove mapping of volume from cluster
+ infinidat.infinibox.infini_map:
cluster: "{{ auto_prefix }}cluster"
volume: "{{ auto_prefix }}vol"
state: absent
@@ -121,8 +120,8 @@
password: "{{ password }}"
system: "{{ system }}"
- - name: IDEMPOTENT test -> Remove mapping of volume {{ auto_prefix }}vol from cluster {{ auto_prefix }}cluster
- infini_map:
+ - name: IDEMPOTENT test -> Remove mapping of volume from cluster
+ infinidat.infinibox.infini_map:
cluster: "{{ auto_prefix }}cluster"
volume: "{{ auto_prefix }}vol"
state: absent
@@ -133,40 +132,40 @@
- '"was not mapped" not in result.msg'
- result.changed
- - name: POSITIVE test -> Remove cluster {{ auto_prefix }}cluster
- infini_cluster:
+ - name: POSITIVE test -> Remove cluster
+ infinidat.infinibox.infini_cluster:
name: "{{ auto_prefix }}cluster"
state: absent
user: "{{ user }}"
password: "{{ password }}"
system: "{{ system }}"
- - name: POSITIVE test -> Remove host {{ auto_prefix }}host1
- infini_host:
+ - name: POSITIVE test -> Remove host
+ infinidat.infinibox.infini_host:
name: "{{ auto_prefix }}host1"
state: absent
user: "{{ user }}"
password: "{{ password }}"
system: "{{ system }}"
- - name: POSITIVE test -> Remove host {{ auto_prefix }}host2
- infini_host:
+ - name: POSITIVE test -> Remove second host
+ infinidat.infinibox.infini_host:
name: "{{ auto_prefix }}host2"
state: absent
user: "{{ user }}"
password: "{{ password }}"
system: "{{ system }}"
- - name: POSITIVE test -> Remove host {{ auto_prefix }}host3
- infini_host:
+ - name: POSITIVE test -> Remove third host
+ infinidat.infinibox.infini_host:
name: "{{ auto_prefix }}host3"
state: absent
user: "{{ user }}"
password: "{{ password }}"
system: "{{ system }}"
- - name: POSITIVE test -> Remove thin volume {{ auto_prefix }}vol under pool {{ auto_prefix }}pool
- infini_vol:
+ - name: POSITIVE test -> Remove thin volume
+ infinidat.infinibox.infini_vol:
name: "{{ auto_prefix }}vol"
pool: "{{ auto_prefix }}pool"
state: absent
@@ -174,8 +173,8 @@
password: "{{ password }}"
system: "{{ system }}"
- - name: POSITIVE test -> Remove pool {{ auto_prefix }}pool
- infini_pool:
+ - name: POSITIVE test -> Remove pool
+ infinidat.infinibox.infini_pool:
name: "{{ auto_prefix }}pool"
state: absent
user: "{{ user }}"
diff --git a/ansible_collections/infinidat/infinibox/playbooks/test_remove_metadata.yml b/ansible_collections/infinidat/infinibox/playbooks/test_remove_metadata.yml
new file mode 100644
index 000000000..a17df38f7
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/playbooks/test_remove_metadata.yml
@@ -0,0 +1,80 @@
+---
+# PSDEV-1108: Playbook for testing cleanup of metadata resources.
+- name: Clean up test of infini_metadata
+ hosts: localhost
+ gather_facts: false # Required for ansible_date_time
+
+ tasks:
+ - name: Remove system metadata key named sysfoo
+ infinidat.infinibox.infini_metadata:
+ object_type: "system"
+ key: "sysfoo"
+ state: "absent"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: Remove volume named foo metadata key named volfoo
+ infinidat.infinibox.infini_metadata:
+ object_type: "vol"
+ object_name: "{{ auto_prefix }}vol"
+ key: "volfoo"
+ state: "absent"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: metadata_out
+
+ - name: Delete volume snapshot
+ infinidat.infinibox.infini_vol:
+ name: "{{ auto_prefix }}volsnap"
+ pool: "{{ auto_prefix }}pool"
+ volume_type: "snapshot"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: Delete volume
+ infinidat.infinibox.infini_vol:
+ name: "{{ auto_prefix }}vol"
+ pool: "{{ auto_prefix }}pool"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ # TODO Delete fs snap. See psdev-1121.
+
+ - name: Delete file system
+ infinidat.infinibox.infini_fs:
+ name: "{{ auto_prefix }}fs"
+ pool: "{{ auto_prefix }}pool"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: Delete host
+ infinidat.infinibox.infini_host:
+ name: "{{ auto_prefix }}host"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: Delete cluster
+ infinidat.infinibox.infini_cluster:
+ name: "{{ auto_prefix }}cluster"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: Delete pool
+ infinidat.infinibox.infini_pool:
+ name: "{{ auto_prefix }}pool"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
diff --git a/ansible_collections/infinidat/infinibox/playbooks/test_remove_network_spaces.yml b/ansible_collections/infinidat/infinibox/playbooks/test_remove_network_spaces.yml
index 3532008e9..b6bc00443 100644
--- a/ansible_collections/infinidat/infinibox/playbooks/test_remove_network_spaces.yml
+++ b/ansible_collections/infinidat/infinibox/playbooks/test_remove_network_spaces.yml
@@ -1,18 +1,16 @@
---
-- hosts: localhost
+- name: Cleanup test of infini_network_space module
+ hosts: localhost
gather_facts: false
- collections:
- - infinidat.infinibox
vars:
- name: iSCSI
+ service_name: iSCSI
service: iSCSI
tasks:
- - name: POSITIVE test -> Remove network space named {{ name }}
- infini_network_space:
- name: "{{ name }}"
+ - name: POSITIVE test -> Remove network space
+ infinidat.infinibox.infini_network_space:
+ name: "{{ service_name }}"
state: absent
-
user: "{{ user }}"
password: "{{ password }}"
system: "{{ system }}"
diff --git a/ansible_collections/infinidat/infinibox/playbooks/test_remove_resources.yml b/ansible_collections/infinidat/infinibox/playbooks/test_remove_resources.yml
index c7cb121df..dd596d7f0 100644
--- a/ansible_collections/infinidat/infinibox/playbooks/test_remove_resources.yml
+++ b/ansible_collections/infinidat/infinibox/playbooks/test_remove_resources.yml
@@ -1,28 +1,35 @@
---
-- hosts: localhost
+- name: Test remove resources
+ hosts: localhost
gather_facts: false
- collections:
- - infinidat.infinibox
tasks:
- - name: POSITIVE test -> Remove cluster {{ auto_prefix }}cluster
- infini_cluster:
+ - name: POSITIVE test -> Remove cluster
+ infinidat.infinibox.infini_cluster:
+ name: "{{ auto_prefix }}cluster_zero_hosts"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Remove cluster second cluster
+ infinidat.infinibox.infini_cluster:
name: "{{ auto_prefix }}cluster"
state: absent
user: "{{ user }}"
password: "{{ password }}"
system: "{{ system }}"
- - name: IDEMPOTENT test -> Remove cluster {{ auto_prefix }}cluster again
- infini_cluster:
+ - name: IDEMPOTENT test -> Remove cluster second cluster again
+ infinidat.infinibox.infini_cluster:
name: "{{ auto_prefix }}cluster"
state: absent
user: "{{ user }}"
password: "{{ password }}"
system: "{{ system }}"
- - name: POSITIVE test -> Remove mapping of volume {{ auto_prefix }}vol from host {{ auto_prefix }}host
- infini_map:
+ - name: POSITIVE test -> Remove mapping of volume from host
+ infinidat.infinibox.infini_map:
host: "{{ auto_prefix }}host"
volume: "{{ auto_prefix }}vol"
state: absent
@@ -31,14 +38,15 @@
system: "{{ system }}"
- name: POSITIVE test -> Rescan with remove option after removing mapping
- shell: |
+ ansible.builtin.shell: |
rescan-scsi-bus.sh --remove
- become: True
+ become: true
register: rescan
failed_when: "rescan.rc != 0 and 'not found' not in rescan.stderr"
+ changed_when: true
- - name: IDEMPOTENT test -> Remove mapping of volume {{ auto_prefix }}vol from host {{ auto_prefix }}host again
- infini_map:
+ - name: IDEMPOTENT test -> Remove mapping of volume from host again
+ infinidat.infinibox.infini_map:
host: "{{ auto_prefix }}host"
volume: "{{ auto_prefix }}vol"
state: absent
@@ -46,32 +54,32 @@
password: "{{ password }}"
system: "{{ system }}"
- - name: POSITIVE test -> Remove host {{ auto_prefix }}host
- infini_host:
+ - name: POSITIVE test -> Remove host
+ infinidat.infinibox.infini_host:
name: "{{ auto_prefix }}host"
state: absent
user: "{{ user }}"
password: "{{ password }}"
system: "{{ system }}"
- - name: POSITIVE test -> Remove host {{ auto_prefix }}host2
- infini_host:
+ - name: POSITIVE test -> Remove second host
+ infinidat.infinibox.infini_host:
name: "{{ auto_prefix }}host2"
state: absent
user: "{{ user }}"
password: "{{ password }}"
system: "{{ system }}"
- - name: IDEMPOTENT test -> Remove host {{ auto_prefix }}host again
- infini_host:
+ - name: IDEMPOTENT test -> Remove host again
+ infinidat.infinibox.infini_host:
name: "{{ auto_prefix }}host"
state: absent
user: "{{ user }}"
password: "{{ password }}"
system: "{{ system }}"
- - name: POSITIVE test -> Remove export client for export /{{ auto_prefix }}export
- infini_export_client:
+ - name: POSITIVE test -> Remove export client for export
+ infinidat.infinibox.infini_export_client:
client: 20.20.20.20
state: absent
access_mode: "RO"
@@ -80,8 +88,8 @@
password: "{{ password }}"
system: "{{ system }}"
- - name: IDEMPOTENT test -> Remove export client for export /{{ auto_prefix }}export again
- infini_export_client:
+ - name: IDEMPOTENT test -> Remove export client for export again
+ infinidat.infinibox.infini_export_client:
client: 20.20.20.20
state: absent
access_mode: "RO"
@@ -90,8 +98,8 @@
password: "{{ password }}"
system: "{{ system }}"
- - name: POSITIVE test -> Remove export {{ auto_prefix }}export of file system {{ auto_prefix }}fs
- infini_export:
+ - name: POSITIVE test -> Remove export of file system
+ infinidat.infinibox.infini_export:
name: "/{{ auto_prefix }}export"
filesystem: "{{ auto_prefix }}fs"
state: absent
@@ -99,8 +107,8 @@
password: "{{ password }}"
system: "{{ system }}"
- - name: IDEMPOTENT test -> Remove export {{ auto_prefix }}export of file system {{ auto_prefix }}fs again
- infini_export:
+ - name: IDEMPOTENT test -> Remove export of file system again
+ infinidat.infinibox.infini_export:
name: "/{{ auto_prefix }}export"
filesystem: "{{ auto_prefix }}fs"
state: absent
@@ -108,8 +116,8 @@
password: "{{ password }}"
system: "{{ system }}"
- - name: POSITIVE test -> Remove file system named {{ auto_prefix }}fs_default under pool {{ auto_prefix }}pool
- infini_fs:
+ - name: POSITIVE test -> Remove file system
+ infinidat.infinibox.infini_fs:
name: "{{ auto_prefix }}fs_default"
size: 1GB
pool: "{{ auto_prefix }}pool"
@@ -118,8 +126,8 @@
password: "{{ password }}"
system: "{{ system }}"
- - name: POSITIVE test -> Remove file system named {{ auto_prefix }}fs_thick under pool {{ auto_prefix }}pool
- infini_fs:
+ - name: POSITIVE test -> Remove thick file system
+ infinidat.infinibox.infini_fs:
name: "{{ auto_prefix }}fs_thick"
size: 1GB
pool: "{{ auto_prefix }}pool"
@@ -128,8 +136,8 @@
password: "{{ password }}"
system: "{{ system }}"
- - name: POSITIVE test -> Remove file system named {{ auto_prefix }}fs under pool {{ auto_prefix }}pool
- infini_fs:
+ - name: POSITIVE test -> Remove thin file system
+ infinidat.infinibox.infini_fs:
name: "{{ auto_prefix }}fs"
size: 1GB
pool: "{{ auto_prefix }}pool"
@@ -138,8 +146,8 @@
password: "{{ password }}"
system: "{{ system }}"
- - name: IDEMPOTENT test -> Remove file system named {{ auto_prefix }}fs under pool {{ auto_prefix }}pool again
- infini_fs:
+ - name: IDEMPOTENT test -> Remove file system again
+ infinidat.infinibox.infini_fs:
name: "{{ auto_prefix }}fs"
size: 1GB
pool: "{{ auto_prefix }}pool"
@@ -148,8 +156,8 @@
password: "{{ password }}"
system: "{{ system }}"
- - name: POSITIVE test -> Remove thin volume {{ auto_prefix }}vol under pool {{ auto_prefix }}pool
- infini_vol:
+ - name: POSITIVE test -> Remove thin volume
+ infinidat.infinibox.infini_vol:
name: "{{ auto_prefix }}vol"
pool: "{{ auto_prefix }}pool"
state: absent
@@ -157,8 +165,8 @@
password: "{{ password }}"
system: "{{ system }}"
- - name: POSITIVE test -> Remove thick volume {{ auto_prefix }}vol_thick under pool {{ auto_prefix }}pool
- infini_vol:
+ - name: POSITIVE test -> Remove thick volume
+ infinidat.infinibox.infini_vol:
name: "{{ auto_prefix }}vol_thick"
pool: "{{ auto_prefix }}pool"
state: absent
@@ -166,8 +174,8 @@
password: "{{ password }}"
system: "{{ system }}"
- - name: IDEMPOTENT test -> Remove volume {{ auto_prefix }}vol under pool {{ auto_prefix }}pool again
- infini_vol:
+ - name: IDEMPOTENT test -> Remove volume again
+ infinidat.infinibox.infini_vol:
name: "{{ auto_prefix }}vol"
pool: "{{ auto_prefix }}pool"
state: absent
@@ -175,24 +183,24 @@
password: "{{ password }}"
system: "{{ system }}"
- - name: POSITIVE test -> Remove pool {{ auto_prefix }}pool
- infini_pool:
+ - name: POSITIVE test -> Remove pool
+ infinidat.infinibox.infini_pool:
name: "{{ auto_prefix }}pool"
state: absent
user: "{{ user }}"
password: "{{ password }}"
system: "{{ system }}"
- - name: POSITIVE test -> Remove snapshot {{ auto_prefix }}vol_snap
- infini_vol:
+ - name: POSITIVE test -> Remove snapshot
+ infinidat.infinibox.infini_vol:
name: "{{ auto_prefix }}vol_snap"
state: absent
user: "{{ user }}"
password: "{{ password }}"
system: "{{ system }}"
- - name: IDEMPOTENT test -> Remove file system named {{ auto_prefix }}fs again from now missing pool {{ auto_prefix }}pool
- infini_fs:
+ - name: IDEMPOTENT test -> Remove file system again from now missing pool
+ infinidat.infinibox.infini_fs:
name: "{{ auto_prefix }}fs"
size: 1GB
pool: "{{ auto_prefix }}pool"
@@ -201,32 +209,32 @@
password: "{{ password }}"
system: "{{ system }}"
- - name: POSITIVE test -> Remove user {{ auto_prefix }}read_only_user
- infini_user:
+ - name: POSITIVE test -> Remove read only user
+ infinidat.infinibox.infini_user:
user_name: "{{ auto_prefix }}read_only_user"
state: absent
user: "{{ user }}"
password: "{{ password }}"
system: "{{ system }}"
- - name: POSITIVE test -> Remove user {{ auto_prefix }}admin_user
- infini_user:
+ - name: POSITIVE test -> Remove admin user
+ infinidat.infinibox.infini_user:
user_name: "{{ auto_prefix }}admin_user"
state: absent
user: "{{ user }}"
password: "{{ password }}"
system: "{{ system }}"
- - name: POSITIVE test -> Remove user {{ auto_prefix }}pool_admin_user
- infini_user:
+ - name: POSITIVE test -> Remove pool admin user
+ infinidat.infinibox.infini_user:
user_name: "{{ auto_prefix }}pool_admin_user"
state: absent
user: "{{ user }}"
password: "{{ password }}"
system: "{{ system }}"
- - name: IDEMPOTENT test -> Remove user {{ auto_prefix }}pool_admin_user again
- infini_user:
+ - name: IDEMPOTENT test -> Remove pool admin user again
+ infinidat.infinibox.infini_user:
user_name: "{{ auto_prefix }}pool_admin_user"
state: absent
user: "{{ user }}"
diff --git a/ansible_collections/infinidat/infinibox/playbooks/test_remove_snapshots.yml b/ansible_collections/infinidat/infinibox/playbooks/test_remove_snapshots.yml
index 0320d8bd4..e119e9e8a 100644
--- a/ansible_collections/infinidat/infinibox/playbooks/test_remove_snapshots.yml
+++ b/ansible_collections/infinidat/infinibox/playbooks/test_remove_snapshots.yml
@@ -1,12 +1,11 @@
---
-- hosts: localhost
+- name: Test remove resources
+ hosts: localhost
gather_facts: false
- collections:
- - infinidat.infinibox
tasks:
- - name: POSITIVE test -> Remove thin volume {{ auto_prefix }}vol under pool {{ auto_prefix }}pool
- infini_vol:
+ - name: POSITIVE test -> Remove thin volume
+ infinidat.infinibox.infini_vol:
name: "{{ auto_prefix }}vol"
pool: "{{ auto_prefix }}pool"
state: absent
@@ -14,8 +13,8 @@
password: "{{ password }}"
system: "{{ system }}"
- - name: IDEMPOTENT test -> Remove volume {{ auto_prefix }}vol under pool {{ auto_prefix }}pool again
- infini_vol:
+ - name: IDEMPOTENT test -> Remove volume again
+ infinidat.infinibox.infini_vol:
name: "{{ auto_prefix }}vol"
pool: "{{ auto_prefix }}pool"
state: absent
@@ -23,8 +22,8 @@
password: "{{ password }}"
system: "{{ system }}"
- - name: POSITIVE test -> Remove pool {{ auto_prefix }}pool
- infini_pool:
+ - name: POSITIVE test -> Remove pool
+ infinidat.infinibox.infini_pool:
name: "{{ auto_prefix }}pool"
state: absent
user: "{{ user }}"
diff --git a/ansible_collections/infinidat/infinibox/playbooks/test_remove_users_repository.yml b/ansible_collections/infinidat/infinibox/playbooks/test_remove_users_repository.yml
new file mode 100644
index 000000000..11106b4b3
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/playbooks/test_remove_users_repository.yml
@@ -0,0 +1,80 @@
+---
+# PSDEV-1108: Playbook for testing cleanup of metadata resources.
+- name: Test remove users repository
+ hosts: localhost
+ gather_facts: false # Required for ansible_date_time
+
+ tasks:
+ - name: Remove system metadata key named sysfoo
+ infinidat.infinibox.infini_metadata:
+ object_type: "system"
+ key: "sysfoo"
+ state: "absent"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: Remove volume named foo metadata key named volfoo
+ infinidat.infinibox.infini_metadata:
+ object_type: "vol"
+ object_name: "{{ auto_prefix }}vol"
+ key: "volfoo"
+ state: "absent"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+ register: metadata_out
+
+ - name: Delete volume snapshot
+ infinidat.infinibox.infini_vol:
+ name: "{{ auto_prefix }}volsnap"
+ pool: "{{ auto_prefix }}pool"
+ volume_type: "snapshot"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: Delete volume
+ infinidat.infinibox.infini_vol:
+ name: "{{ auto_prefix }}vol"
+ pool: "{{ auto_prefix }}pool"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ # TODO Delete fs snap. See psdev-1121.
+
+ - name: Delete file system
+ infinidat.infinibox.infini_fs:
+ name: "{{ auto_prefix }}fs"
+ pool: "{{ auto_prefix }}pool"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: Delete host
+ infinidat.infinibox.infini_host:
+ name: "{{ auto_prefix }}host"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: Delete cluster
+ infinidat.infinibox.infini_cluster:
+ name: "{{ auto_prefix }}cluster"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: Delete pool
+ infinidat.infinibox.infini_pool:
+ name: "{{ auto_prefix }}pool"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
diff --git a/ansible_collections/infinidat/infinibox/playbooks/test_remove_volumes.yml b/ansible_collections/infinidat/infinibox/playbooks/test_remove_volumes.yml
new file mode 100644
index 000000000..728e45eb9
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/playbooks/test_remove_volumes.yml
@@ -0,0 +1,34 @@
+---
+# See PSDEV-1000: Fix infini_vols handling of write_protected parameter
+# Remove three volumes: vol_nonwriteable, vol_explicitly_writable and vol_writable.
+- name: Test remove volumes
+ hosts: localhost
+ gather_facts: false
+ tasks:
+
+ - name: POSITIVE test -> Remove write protected volume
+ infinidat.infinibox.infini_vol:
+ name: "{{ auto_prefix }}vol_nonwriteable"
+ pool: "{{ auto_prefix }}pool"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Remove explicitly writable volume
+ infinidat.infinibox.infini_vol:
+ name: "{{ auto_prefix }}vol_explicitly_writable"
+ pool: "{{ auto_prefix }}pool"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+
+ - name: POSITIVE test -> Remove implicitly writable volume
+ infinidat.infinibox.infini_vol:
+ name: "{{ auto_prefix }}vol_writable"
+ pool: "{{ auto_prefix }}pool"
+ state: absent
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
diff --git a/ansible_collections/infinidat/infinibox/plugins/doc_fragments/infinibox.py b/ansible_collections/infinidat/infinibox/plugins/doc_fragments/infinibox.py
index f88a55ea4..cf3fc13b0 100644
--- a/ansible_collections/infinidat/infinibox/plugins/doc_fragments/infinibox.py
+++ b/ansible_collections/infinidat/infinibox/plugins/doc_fragments/infinibox.py
@@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
-# Copyright: (c) 2022, Infinidat <info@infinidat.com>
+# Copyright: (c) 2024, Infinidat <info@infinidat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
diff --git a/ansible_collections/infinidat/infinibox/plugins/filter/psus_filters.py b/ansible_collections/infinidat/infinibox/plugins/filter/psus_filters.py
index 5e140bdd8..68000c02b 100644
--- a/ansible_collections/infinidat/infinibox/plugins/filter/psus_filters.py
+++ b/ansible_collections/infinidat/infinibox/plugins/filter/psus_filters.py
@@ -5,7 +5,7 @@
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
-# Copyright: (c) 2022, Infinidat <info@infinidat.com>
+# Copyright: (c) 2024, Infinidat <info@infinidat.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
@@ -28,7 +28,6 @@
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-from ansible.errors import AnsibleError
import datetime
diff --git a/ansible_collections/infinidat/infinibox/plugins/module_utils/infinibox.py b/ansible_collections/infinidat/infinibox/plugins/module_utils/infinibox.py
index 31df73d04..24f3aa9fb 100644
--- a/ansible_collections/infinidat/infinibox/plugins/module_utils/infinibox.py
+++ b/ansible_collections/infinidat/infinibox/plugins/module_utils/infinibox.py
@@ -1,16 +1,21 @@
# -*- coding: utf-8 -*-
-# Copyright: (c) 2022, Infinidat <info@infinidat.com>
+# Copyright: (c) 2024, Infinidat <info@infinidat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# pylint:
+# disable=use-list-literal,use-dict-literal,line-too-long,wrong-import-position,broad-exception-caught,invalid-name
+
+""" Infinidat utilities """
+
from __future__ import (absolute_import, division, print_function)
+
__metaclass__ = type
-from ansible.module_utils.six import raise_from
-try:
- import ansible.module_utils.errors
-except (ImportError, ModuleNotFoundError):
- import errors # Used during "make dev-hack-module-[present, stat, absent]"
+# try:
+# import ansible.module_utils.errors
+# except (ImportError, ModuleNotFoundError):
+# import errors # Used during "make dev-hack-module-[present, stat, absent]"
try:
from infinisdk import InfiniBox, core
@@ -22,13 +27,32 @@ else:
HAS_INFINISDK = True
INFINISDK_IMPORT_ERROR = None
+HAS_ARROW = True
+try:
+ import arrow
+except ImportError:
+ HAS_ARROW = False
+except Exception:
+ HAS_INFINISDK = False
+
from functools import wraps
from os import environ
from os import path
from datetime import datetime
+HAS_URLLIB3 = True
+try:
+ import urllib3
+ urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
+except ImportError:
+ HAS_URLLIB3 = False
+
-def unixMillisecondsToDate(unix_ms):
+INFINIBOX_SYSTEM = None
+
+
+def unixMillisecondsToDate(unix_ms): # pylint: disable=invalid-name
+ """ Convert unix time with ms to a datetime UTC time """
return (datetime.utcfromtimestamp(unix_ms / 1000.), 'UTC')
@@ -39,12 +63,13 @@ def api_wrapper(func):
module = args[0]
try:
return func(*args, **kwargs)
- except core.exceptions.APICommandException as e:
- module.fail_json(msg=e.message)
- except core.exceptions.SystemNotFoundException as e:
- module.fail_json(msg=e.message)
- except Exception:
- raise
+ except core.exceptions.SystemNotFoundException as err:
+ module.fail_json(msg=str(err))
+ except core.exceptions.APICommandException as err:
+ module.fail_json(msg=str(err))
+ except Exception as err:
+ module.fail_json(msg=str(err))
+ return None # Should never get to this line but it quiets pylint inconsistent-return-statements
return __wrapper
@@ -74,28 +99,37 @@ def merge_two_dicts(dict1, dict2):
@api_wrapper
def get_system(module):
- """Return System Object or Fail"""
- box = module.params['system']
- user = module.params.get('user', None)
- password = module.params.get('password', None)
-
- if user and password:
- system = InfiniBox(box, auth=(user, password), use_ssl=True)
- elif environ.get('INFINIBOX_USER') and environ.get('INFINIBOX_PASSWORD'):
- system = InfiniBox(box,
- auth=(environ.get('INFINIBOX_USER'),
- environ.get('INFINIBOX_PASSWORD')),
- use_ssl=True)
- elif path.isfile(path.expanduser('~') + '/.infinidat/infinisdk.ini'):
- system = InfiniBox(box, use_ssl=True)
- else:
- module.fail_json(msg="You must set INFINIBOX_USER and INFINIBOX_PASSWORD environment variables or set username/password module arguments")
+ """
+ Return System Object if it does not exist or Fail.
+ Use a global system Infinibox object so that there will only be one
+ system session used for this module instance.
+ Enables execute_state() to log out of the only session properly.
+ """
+ global INFINIBOX_SYSTEM # pylint: disable=global-statement
+
+ if not INFINIBOX_SYSTEM:
+ # Create system and login
+ box = module.params['system']
+ user = module.params.get('user', None)
+ password = module.params.get('password', None)
+ if user and password:
+ INFINIBOX_SYSTEM = InfiniBox(box, auth=(user, password), use_ssl=True)
+ elif environ.get('INFINIBOX_USER') and environ.get('INFINIBOX_PASSWORD'):
+ INFINIBOX_SYSTEM = InfiniBox(box,
+ auth=(environ.get('INFINIBOX_USER'),
+ environ.get('INFINIBOX_PASSWORD')),
+ use_ssl=True)
+ elif path.isfile(path.expanduser('~') + '/.infinidat/infinisdk.ini'):
+ INFINIBOX_SYSTEM = InfiniBox(box, use_ssl=True)
+ else:
+ module.fail_json(msg="You must set INFINIBOX_USER and INFINIBOX_PASSWORD environment variables or set username/password module arguments")
- try:
- system.login()
- except Exception:
- module.fail_json(msg="Infinibox authentication failed. Check your credentials")
- return system
+ try:
+ INFINIBOX_SYSTEM.login()
+ except Exception:
+ module.fail_json(msg="Infinibox authentication failed. Check your credentials")
+
+ return INFINIBOX_SYSTEM
@api_wrapper
@@ -108,7 +142,10 @@ def get_pool(module, system):
try:
name = module.params['pool']
except KeyError:
- name = module.params['name']
+ try:
+ name = module.params['name']
+ except KeyError:
+ name = module.params['object_name'] # For metadata
return system.pools.get(name=name)
except Exception:
return None
@@ -121,7 +158,10 @@ def get_filesystem(module, system):
try:
filesystem = system.filesystems.get(name=module.params['filesystem'])
except KeyError:
- filesystem = system.filesystems.get(name=module.params['name'])
+ try:
+ filesystem = system.filesystems.get(name=module.params['name'])
+ except KeyError:
+ filesystem = system.filesystems.get(name=module.params['object_name'])
return filesystem
except Exception:
return None
@@ -137,7 +177,7 @@ def get_export(module, system):
export_name = module.params['name']
export = system.exports.get(export_path=export_name)
- except ObjectNotFound as err:
+ except ObjectNotFound:
return None
return export
@@ -150,7 +190,10 @@ def get_volume(module, system):
try:
volume = system.volumes.get(name=module.params['name'])
except KeyError:
- volume = system.volumes.get(name=module.params['volume'])
+ try:
+ volume = system.volumes.get(name=module.params['volume'])
+ except KeyError:
+ volume = system.volumes.get(name=module.params['object_name']) # Used by metadata module
return volume
except Exception:
return None
@@ -167,16 +210,23 @@ def get_net_space(module, system):
@api_wrapper
-def get_vol_sn(module, system):
- """Return Volume or None"""
+def get_vol_by_sn(module, system):
+ """Return volume that matches the serial or None"""
try:
- try:
- volume = system.volumes.get(serial=module.params['serial'])
- except KeyError:
- return None
- return volume
+ volume = system.volumes.get(serial=module.params['serial'])
except Exception:
return None
+ return volume
+
+
+@api_wrapper
+def get_fs_by_sn(module, system):
+ """Return filesystem that matches the serial or None"""
+ try:
+ filesystem = system.filesystems.get(serial=module.params['serial'])
+ except Exception:
+ return None
+ return filesystem
@api_wrapper
@@ -189,7 +239,10 @@ def get_host(module, system):
try:
host_param = module.params['name']
except KeyError:
- host_param = module.params['host']
+ try:
+ host_param = module.params['host']
+ except KeyError:
+ host_param = module.params['object_name'] # For metadata
if a_host_name == host_param:
host = a_host
@@ -208,7 +261,10 @@ def get_cluster(module, system):
try:
cluster_param = module.params['name']
except KeyError:
- cluster_param = module.params['cluster']
+ try:
+ cluster_param = module.params['cluster']
+ except KeyError:
+ cluster_param = module.params['object_name'] # For metadata
if a_cluster_name == cluster_param:
cluster = a_cluster
@@ -217,12 +273,86 @@ def get_cluster(module, system):
@api_wrapper
-def get_user(module, system):
+def get_user(module, system, user_name_to_find=None):
"""Find a user by the user_name specified in the module"""
user = None
- user_name = module.params['user_name']
+ if not user_name_to_find:
+ user_name = module.params['user_name']
+ else:
+ user_name = user_name_to_find
try:
user = system.users.get(name=user_name)
except ObjectNotFound:
pass
return user
+
+
+def check_snapshot_lock_options(module):
+ """
+ Check if specified options are feasible for a snapshot.
+
+ Prevent very long lock times.
+ max_delta_minutes limits locks to 30 days (43200 minutes).
+
+ This functionality is broken out from manage_snapshot_locks() to allow
+ it to be called by create_snapshot() before the snapshot is actually
+ created.
+ """
+ snapshot_lock_expires_at = module.params["snapshot_lock_expires_at"]
+
+ if snapshot_lock_expires_at: # Then user has specified wish to lock snap
+ lock_expires_at = arrow.get(snapshot_lock_expires_at)
+
+ # Check for lock in the past
+ now = arrow.utcnow()
+ if lock_expires_at <= now:
+ msg = "Cannot lock snapshot with a snapshot_lock_expires_at "
+ msg += f"of '{snapshot_lock_expires_at}' from the past"
+ module.fail_json(msg=msg)
+
+ # Check for lock later than max lock, i.e. too far in future.
+ max_delta_minutes = 43200 # 30 days in minutes
+ max_lock_expires_at = now.shift(minutes=max_delta_minutes)
+ if lock_expires_at >= max_lock_expires_at:
+ msg = f"snapshot_lock_expires_at exceeds {max_delta_minutes // 24 // 60} days in the future"
+ module.fail_json(msg=msg)
+
+
+def manage_snapshot_locks(module, snapshot):
+ """
+ Manage the locking of a snapshot. Check for bad lock times.
+ See check_snapshot_lock_options() which has additional checks.
+ """
+ snapshot_lock_expires_at = module.params["snapshot_lock_expires_at"]
+ snap_is_locked = snapshot.get_lock_state() == "LOCKED"
+ current_lock_expires_at = snapshot.get_lock_expires_at()
+ changed = False
+
+ check_snapshot_lock_options(module)
+
+ if snapshot_lock_expires_at: # Then user has specified wish to lock snap
+ lock_expires_at = arrow.get(snapshot_lock_expires_at)
+ if snap_is_locked and lock_expires_at < current_lock_expires_at:
+ # Lock earlier than current lock
+ msg = f"snapshot_lock_expires_at '{lock_expires_at}' preceeds the current lock time of '{current_lock_expires_at}'"
+ module.fail_json(msg=msg)
+ elif snap_is_locked and lock_expires_at == current_lock_expires_at:
+ # Lock already set to correct time
+ pass
+ else:
+ # Set lock
+ if not module.check_mode:
+ snapshot.update_lock_expires_at(lock_expires_at)
+ changed = True
+ return changed
+
+
+def catch_failed_module_utils_imports(module):
+ msg = ""
+ if not HAS_ARROW:
+ msg += "Failed to import arrow module. "
+ if not HAS_INFINISDK:
+ msg += "Failed to import infinisdk module. "
+ if not HAS_URLLIB3:
+ msg += "Failed to import urllib3 module. "
+ module.fail_json(msg=msg)
diff --git a/ansible_collections/infinidat/infinibox/plugins/modules/infini_certificate.py b/ansible_collections/infinidat/infinibox/plugins/modules/infini_certificate.py
new file mode 100644
index 000000000..bb32b48b1
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/plugins/modules/infini_certificate.py
@@ -0,0 +1,199 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# pylint: disable=invalid-name,use-dict-literal,line-too-long,wrong-import-position
+
+# Copyright: (c) 2024, Infinidat <info@infinidat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+"""This module creates or modifies SSL certificates on Infinibox."""
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+---
+module: infini_certificate
+version_added: 2.16.0
+short_description: Create (present state) or clear (absent state) SSL certificates on Infinibox
+description:
+ - This module uploads (present state) or clears (absent state) SSL certificates on Infinibox
+author: David Ohlemacher (@ohlemacher)
+options:
+ certificate_file_name:
+ description:
+ - Name with full path of a certificate file.
+ type: str
+ required: false
+ state:
+ description:
+ - Creates/Modifies the systems SSL certificate by uploading one from a file, when using state present.
+ - For state absent, the current certificate is removed and a new self-signed certificate is automatically generated by the IBOX.
+ - State stat shows the existing certificate's details.
+ type: str
+ required: false
+ default: present
+ choices: [ "stat", "present", "absent" ]
+extends_documentation_fragment:
+ - infinibox
+"""
+
+EXAMPLES = r"""
+- name: Upload SSL certificate from file
+ infini_certificate:
+ certificate_file_name: cert.crt
+ state: present
+ user: admin
+ password: secret
+ system: ibox001
+
+- name: State SSL certificate
+ infini_certificate:
+ state: stat
+ user: admin
+ password: secret
+ system: ibox001
+
+- name: Clear SSL certificate
+ infini_certificate:
+ state: absent
+ user: admin
+ password: secret
+ system: ibox001
+"""
+
+# RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+from ansible_collections.infinidat.infinibox.plugins.module_utils.infinibox import (
+ merge_two_dicts,
+ get_system,
+ infinibox_argument_spec,
+)
+
+HAS_URLLIB3 = True
+try:
+ from infinisdk.core.exceptions import APICommandFailed
+except ImportError:
+ HAS_URLLIB3 = False
+
+
+def handle_stat(module):
+ """ Handle the stat state parameter """
+ certificate_file_name = module.params['certificate_file_name']
+ path = "system/certificates"
+ system = get_system(module)
+ try:
+ cert_result = system.api.get(path=path).get_result()[0]
+ except APICommandFailed:
+ msg = f"Cannot stat SSL certificate {certificate_file_name}"
+ module.fail_json(msg=msg)
+ result = dict(
+ changed=False,
+ msg="SSL certificate stat {certificate_file_name} found"
+ )
+ result = merge_two_dicts(result, cert_result)
+ module.exit_json(**result)
+
+
+def handle_present(module):
+ """ Handle the present state parameter """
+ certificate_file_name = module.params['certificate_file_name']
+ path = "system/certificates"
+ system = get_system(module)
+
+ with open(certificate_file_name, 'rb') as cert_file:
+ try:
+ try:
+ files = {'file': cert_file}
+ except FileNotFoundError:
+ module.fail_json(msg=f"Cannot find SSL certificate file named {certificate_file_name}")
+ except Exception as err: # pylint: disable=broad-exception-caught
+ module.fail_json(msg=f"Cannot open SSL certificate file named {certificate_file_name}: {err}")
+ cert_result = system.api.post(path=path, files=files).get_result()
+ except APICommandFailed as err:
+ msg = f"Cannot upload cert: {err}"
+ module.fail_json(msg=msg)
+
+ cert_serial = cert_result['certificate']['serial_number']
+ cert_issued_by_cn = cert_result['certificate']['issued_by']['CN']
+ cert_issued_to_cn = cert_result['certificate']['issued_to']['CN']
+ result = dict(
+ changed=True,
+ msg="System SSL certificate uploaded successfully. " +
+ f"Certificate S/N {cert_serial} issued by CN {cert_issued_by_cn} to CN {cert_issued_to_cn}"
+ )
+ result = merge_two_dicts(result, cert_result)
+ module.exit_json(**result)
+
+
+def handle_absent(module):
+ """ Handle the absent state parameter. Clear existing cert. IBOX will install self signed cert. """
+ path = "system/certificates/generate_self_signed?approved=true"
+ system = get_system(module)
+ try:
+ cert_result = system.api.post(path=path).get_result()
+ except APICommandFailed as err:
+ msg = f"Cannot clear SSL certificate: {err}"
+ module.fail_json(msg=msg)
+ result = dict(
+ changed=True,
+ msg="System SSL certificate cleared and a self signed certificate was installed successfully"
+ )
+ result = merge_two_dicts(result, cert_result)
+ module.exit_json(**result)
+
+
+def execute_state(module):
+ """Handle states"""
+ state = module.params["state"]
+ try:
+ if state == "stat":
+ handle_stat(module)
+ elif state == "present":
+ handle_present(module)
+ elif state == "absent":
+ handle_absent(module)
+ else:
+ module.fail_json(msg=f"Internal handler error. Invalid state: {state}")
+ finally:
+ system = get_system(module)
+ system.logout()
+
+
+def check_options(module):
+ """Verify module options are sane"""
+ certificate_file_name = module.params["certificate_file_name"]
+ state = module.params["state"]
+
+ if state in ["stat", "absent"]:
+ pass
+ if state in ["present"]:
+ if not certificate_file_name:
+ msg = "Certificate file name parameter must be provided"
+ module.fail_json(msg=msg)
+
+
+def main():
+ """ Main """
+ argument_spec = infinibox_argument_spec()
+ argument_spec.update(
+ dict(
+ certificate_file_name=dict(required=False, default=None),
+ state=dict(default="present", choices=["stat", "present", "absent"]),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_URLLIB3:
+ module.fail_json(msg=missing_required_lib("urllib3"))
+
+ check_options(module)
+ execute_state(module)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/infinidat/infinibox/plugins/modules/infini_cluster.py b/ansible_collections/infinidat/infinibox/plugins/modules/infini_cluster.py
index fe682cf3c..c1972efe2 100644
--- a/ansible_collections/infinidat/infinibox/plugins/modules/infini_cluster.py
+++ b/ansible_collections/infinidat/infinibox/plugins/modules/infini_cluster.py
@@ -1,10 +1,15 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# Copyright: (c) 2022, Infinidat <info@infinidat.com>
+# pylint: disable=invalid-name,use-dict-literal,too-many-branches,too-many-locals,line-too-long,wrong-import-position
+
+""" A module for managing Infinibox clusters """
+
+# Copyright: (c) 2024, Infinidat <info@infinidat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
+
__metaclass__ = type
DOCUMENTATION = r'''
@@ -51,12 +56,9 @@ EXAMPLES = r'''
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-import traceback
-
try:
from ansible_collections.infinidat.infinibox.plugins.module_utils.infinibox import (
HAS_INFINISDK,
- INFINISDK_IMPORT_ERROR,
api_wrapper,
infinibox_argument_spec,
get_system,
@@ -67,7 +69,6 @@ try:
except ModuleNotFoundError:
from infinibox import ( # Used when hacking
HAS_INFINISDK,
- INFINISDK_IMPORT_ERROR,
api_wrapper,
infinibox_argument_spec,
get_system,
@@ -76,12 +77,6 @@ except ModuleNotFoundError:
merge_two_dicts,
)
-try:
- from infi.dtypes.iqn import make_iscsi_name
- HAS_INFI_MOD = True
-except ImportError:
- HAS_INFI_MOD = False
-
@api_wrapper
def get_host_by_name(system, host_name):
@@ -98,67 +93,58 @@ def get_host_by_name(system, host_name):
@api_wrapper
def create_cluster(module, system):
- # print("create cluster")
- changed = True
+ """ Create a cluster """
+ changed = False
if not module.check_mode:
cluster = system.host_clusters.create(name=module.params['name'])
cluster_hosts = module.params['cluster_hosts']
- for cluster_host in cluster_hosts:
- if cluster_host['host_cluster_state'] == 'present':
- host = get_host_by_name(system, cluster_host['host_name'])
- cluster.add_host(host)
- # print("Added host {0} to cluster {1}".format(host.get_name, cluster.get_name()))
- # else:
- # print("Skipped adding (absent) host {0} to cluster {1}".format(host.get_name, cluster.get_name()))
+ if cluster_hosts:
+ for cluster_host in cluster_hosts:
+ if cluster_host['host_cluster_state'] == 'present':
+ host = get_host_by_name(system, cluster_host['host_name'])
+ cluster.add_host(host)
+ changed = True
return changed
@api_wrapper
def update_cluster(module, system, cluster):
- # print("update cluster")
+ """ Update a cluster """
changed = False
# e.g. of one host dict found in the module.params['cluster_hosts'] list:
# {host_name: <'some_name'>, host_cluster_state: <'present' or 'absent'>}
module_cluster_hosts = module.params['cluster_hosts']
current_cluster_hosts_names = [host.get_name() for host in cluster.get_field('hosts')]
- # print("current_cluster_hosts_names:", current_cluster_hosts_names)
- for module_cluster_host in module_cluster_hosts:
- module_cluster_host_name = module_cluster_host['host_name']
- # print("module_cluster_host_name:", module_cluster_host_name)
- # Need to add host to cluster?
- if module_cluster_host_name not in current_cluster_hosts_names:
- if module_cluster_host['host_cluster_state'] == 'present':
- host = get_host_by_name(system, module_cluster_host_name)
- if not host:
- msg = 'Cannot find host {0} to add to cluster {1}'.format(
- module_cluster_host_name,
- cluster.get_name(),
- )
- module.fail_json(msg=msg)
- cluster.add_host(host)
- # print("Added host {0} to cluster {1}".format(host.get_name(), cluster.get_name()))
- changed = True
- # Need to remove host from cluster?
- elif module_cluster_host_name in current_cluster_hosts_names:
- if module_cluster_host['host_cluster_state'] == 'absent':
- host = get_host_by_name(system, module_cluster_host_name)
- if not host:
- msg = 'Cannot find host {0} to add to cluster {1}'.format(
- module_cluster_host_name,
- cluster.get_name(),
- )
- module.fail_json(msg=msg)
- cluster.remove_host(host)
- # print("Removed host {0} from cluster {1}".format(host.get_name(), cluster.get_name()))
- changed = True
+ if module_cluster_hosts:
+ for module_cluster_host in module_cluster_hosts:
+ module_cluster_host_name = module_cluster_host['host_name']
+ # Need to add host to cluster?
+ if module_cluster_host_name not in current_cluster_hosts_names:
+ if module_cluster_host['host_cluster_state'] == 'present':
+ host = get_host_by_name(system, module_cluster_host_name)
+ if not host:
+ msg = f'Cannot find host {module_cluster_host_name} to add to cluster {cluster.get_name()}'
+ module.fail_json(msg=msg)
+ cluster.add_host(host)
+ changed = True
+ # Need to remove host from cluster?
+ elif module_cluster_host_name in current_cluster_hosts_names:
+ if module_cluster_host['host_cluster_state'] == 'absent':
+ host = get_host_by_name(system, module_cluster_host_name)
+ if not host:
+ msg = f'Cannot find host {module_cluster_host_name} to add to cluster {cluster.get_name()}'
+ module.fail_json(msg=msg)
+ cluster.remove_host(host)
+ changed = True
return changed
@api_wrapper
def delete_cluster(module, cluster):
+ """ Delete a cluster """
if not cluster:
- msg = "Cluster {0} not found".format(cluster.get_name())
+ msg = f"Cluster {cluster.get_name()} not found"
module.fail_json(msg=msg)
changed = True
if not module.check_mode:
@@ -166,13 +152,8 @@ def delete_cluster(module, cluster):
return changed
-def get_sys_cluster(module):
- system = get_system(module)
- cluster = get_cluster(module, system)
- return (system, cluster)
-
-
def get_cluster_fields(cluster):
+ """ Find fields for cluster """
fields = cluster.get_fields(from_cache=True, raw_value=True)
created_at, created_at_timezone = unixMillisecondsToDate(fields.get('created_at', None))
field_dict = dict(
@@ -192,10 +173,12 @@ def get_cluster_fields(cluster):
def handle_stat(module):
- system, cluster = get_sys_cluster(module)
+ """ Handle stat state """
+ system = get_system(module)
+ cluster = get_cluster(module, system)
cluster_name = module.params["name"]
if not cluster:
- module.fail_json(msg='Cluster {0} not found'.format(cluster_name))
+ module.fail_json(msg=f'Cluster {cluster_name} not found')
field_dict = get_cluster_fields(cluster)
result = dict(
changed=False,
@@ -206,34 +189,39 @@ def handle_stat(module):
def handle_present(module):
- system, cluster = get_sys_cluster(module)
+ """ Handle present state """
+ system = get_system(module)
+ cluster = get_cluster(module, system)
cluster_name = module.params["name"]
if not cluster:
changed = create_cluster(module, system)
- msg = 'Cluster {0} created'.format(cluster_name)
+ msg = f'Cluster {cluster_name} created'
module.exit_json(changed=changed, msg=msg)
else:
changed = update_cluster(module, system, cluster)
if changed:
- msg = 'Cluster {0} updated'.format(cluster_name)
+ msg = f'Cluster {cluster_name} updated'
else:
- msg = 'Cluster {0} required no changes'.format(cluster_name)
+ msg = f'Cluster {cluster_name} required no changes'
module.exit_json(changed=changed, msg=msg)
def handle_absent(module):
- system, cluster = get_sys_cluster(module)
+ """ Handle absent state """
+ system = get_system(module)
+ cluster = get_cluster(module, system)
cluster_name = module.params["name"]
if not cluster:
changed = False
- msg = "Cluster {0} already absent".format(cluster_name)
+ msg = f"Cluster {cluster_name} already absent"
else:
changed = delete_cluster(module, cluster)
- msg = "Cluster {0} removed".format(cluster_name)
+ msg = f"Cluster {cluster_name} removed"
module.exit_json(changed=changed, msg=msg)
def execute_state(module):
+ """ Handle states """
state = module.params['state']
try:
if state == 'stat':
@@ -243,36 +231,38 @@ def execute_state(module):
elif state == 'absent':
handle_absent(module)
else:
- module.fail_json(msg='Internal handler error. Invalid state: {0}'.format(state))
+ module.fail_json(msg=f'Internal handler error. Invalid state: {state}')
finally:
system = get_system(module)
system.logout()
def check_options(module):
+ """ Check module parameters for logic errors """
state = module.params['state']
if state == 'present':
- if module.params['cluster_hosts'] is None:
- module.fail_json(msg='Option cluster_hosts, a list, must be provided')
-
cluster_hosts = module.params['cluster_hosts']
- for host in cluster_hosts:
- try:
- # Check host has required keys
- valid_keys = ['host_name', 'host_cluster_state']
- for valid_key in valid_keys:
- not_used = host[valid_key]
- # Check host has no unknown keys
- if len(host.keys()) != len(valid_keys):
- raise KeyError
- except KeyError:
- msg = 'With state present, all cluster_hosts ' \
- + 'require host_name and host_cluster_state key:values ' \
- + 'and no others'
- module.fail_json(msg=msg)
+ if cluster_hosts:
+ for host in cluster_hosts:
+ try:
+ # Check host has required keys
+ valid_keys = ['host_name', 'host_cluster_state']
+ for valid_key in valid_keys:
+ # _ = host[valid_key]
+ if valid_key not in host.keys():
+ raise KeyError
+ # Check host has no unknown keys
+ if len(host.keys()) != len(valid_keys):
+ raise KeyError
+ except KeyError:
+ msg = 'With state present, all cluster_hosts ' \
+ + 'require host_name and host_cluster_state key:values ' \
+ + 'and no others'
+ module.fail_json(msg=msg)
def main():
+ """ Main """
argument_spec = infinibox_argument_spec()
argument_spec.update(
dict(
@@ -284,9 +274,6 @@ def main():
module = AnsibleModule(argument_spec, supports_check_mode=True)
- if not HAS_INFI_MOD:
- module.fail_json(msg=missing_required_lib('infi.dtypes.iqn'))
-
if not HAS_INFINISDK:
module.fail_json(msg=missing_required_lib('infinisdk'))
diff --git a/ansible_collections/infinidat/infinibox/plugins/modules/infini_config.py b/ansible_collections/infinidat/infinibox/plugins/modules/infini_config.py
new file mode 100644
index 000000000..881480008
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/plugins/modules/infini_config.py
@@ -0,0 +1,238 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# pylint: disable=invalid-name,use-dict-literal,too-many-branches,too-many-locals,line-too-long,wrong-import-position
+
+"""This module Modifies config on Infinibox."""
+
+# Copyright: (c) 2024, Infinidat <info@infinidat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+---
+module: infini_config
+version_added: 2.13.0
+short_description: Modify config on Infinibox
+description:
+ - This module modifies system config on Infinibox.
+author: Wei Wang (@wwang)
+options:
+ config_group:
+ description:
+ - Config group
+ type: str
+ required: true
+ choices: [ "core", "ip_config", "iscsi", "limits", "mgmt", "ndoe_interfaces", "overriders", "security", "ssh" ]
+ key:
+ description:
+ - Name of the config
+ type: str
+ required: true
+ value:
+ description:
+ - Value of the config key
+ type: str
+ required: false
+ state:
+ description:
+ - Query or modifies config when.
+ type: str
+ required: false
+ default: present
+ choices: [ "stat", "present" ]
+
+extends_documentation_fragment:
+ - infinibox
+"""
+
+EXAMPLES = r"""
+- name: Set compression setting to true
+ infini_config:
+ config_group: "mgmt"
+ key: "pool.compression_enabled_default"
+ value: false
+ state: "present"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+"""
+
+# RETURN = r''' # '''
+
+# -*- coding: utf-8 -*-
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+from ansible_collections.infinidat.infinibox.plugins.module_utils.infinibox import (
+ HAS_INFINISDK,
+ api_wrapper,
+ infinibox_argument_spec,
+ get_system,
+)
+
+try:
+ from infinisdk.core.exceptions import APICommandFailed
+except ImportError:
+ pass # Handled by HAS_INFINISDK from module_utils
+
+
+@api_wrapper
+def get_config(module, disable_fail=False):
+ """
+ Find and return config setting value
+ Use disable_fail when we are looking for config
+ and it may or may not exist and neither case is an error.
+ """
+ system = get_system(module)
+ config_group = module.params["config_group"]
+ key = module.params["key"]
+ result = None
+
+ path = f"config/{config_group}/{key}"
+ try:
+ api_response = system.api.get(path=path)
+ except APICommandFailed as err:
+ module.fail_json(msg=f"Cannot {config_group} key {key}: {err}")
+
+ if api_response:
+ result = api_response.get_result()
+ good_status = api_response.response.status_code == 200
+ if not disable_fail and not good_status:
+ msg = f"Configuration for {config_group} with key {key} failed"
+ module.fail_json(msg=msg)
+ elif disable_fail and not good_status:
+ return None
+ return result
+
+
+def handle_stat(module):
+ """Return config stat"""
+
+ config_group = module.params["config_group"]
+ key = module.params["key"]
+ value = get_config(module)
+
+ result = {
+ "changed": False,
+ "object_type": config_group,
+ "key": key,
+ "value": value,
+ }
+ module.exit_json(**result)
+
+
+@api_wrapper
+def set_config(module):
+ """
+ Find and return config setting value
+ Use disable_fail when we are looking for config
+ and it may or may not exist and neither case is an error.
+ """
+ system = get_system(module)
+ config_group = module.params["config_group"]
+ key = module.params["key"]
+ value = module.params["value"]
+
+ path = f"config/{config_group}/{key}"
+
+ if value.lower() == "true":
+ data = True
+ elif value.lower() == "false":
+ data = False
+
+ try:
+ system.api.put(path=path, data=data)
+ except APICommandFailed as err:
+ module.fail_json(msg=f"Cannot set config group {config_group} key {key} to value {value}: {err}")
+ # Variable 'changed' not returned by design
+
+
+def handle_present(module):
+ """Make config present"""
+ changed = False
+ msg = "Config unchanged"
+ if not module.check_mode:
+ old_config = get_config(module, disable_fail=True)
+ set_config(module)
+ new_config = get_config(module)
+ changed = new_config != old_config
+ if changed:
+ msg = "Config changed"
+ else:
+ msg = "Config unchanged since the value is the same as the existing config"
+ module.exit_json(changed=changed, msg=msg)
+
+
+def execute_state(module):
+ """Determine which state function to execute and do so"""
+ state = module.params["state"]
+ try:
+ if state == "stat":
+ handle_stat(module)
+ elif state == "present":
+ handle_present(module)
+ else:
+ module.fail_json(msg=f"Internal handler error. Invalid state: {state}")
+ finally:
+ system = get_system(module)
+ system.logout()
+
+
+def check_options(module):
+ """Verify module options are sane"""
+ state = module.params["state"]
+ config_group = module.params["config_group"]
+ key = module.params["key"]
+ value = module.params["value"]
+ vtype = type(value)
+
+ groups = [
+ "core",
+ "ip_config",
+ "iscsi",
+ "limits",
+ "mgmt",
+ "ndoe_interfaces",
+ "overriders",
+ "security",
+ "ssh",
+ ]
+
+ if state == "present" and key == "pool.compression_enabled_default":
+ if not isinstance(value, str): # isvalue.lower() not in values:
+ module.fail_json(
+ f"Value must be of type {type(str())}. Invalid value: {value} of {vtype}."
+ )
+ if config_group not in groups:
+ module.fail_json(
+ f"Config_group must be one of {groups}"
+ )
+
+
+def main():
+ """Main module function"""
+ argument_spec = infinibox_argument_spec()
+
+ argument_spec.update(
+ {
+ "config_group": {"required": True, "choices": ["core", "ip_config", "iscsi", "limits", "mgmt", "ndoe_interfaces", "overriders", "security", "ssh"]},
+ "key": {"required": True, "no_log": False},
+ "value": {"required": False, "default": None},
+ "state": {"required": False, "default": "present", "choices": ["stat", "present"]},
+ }
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_INFINISDK:
+ module.fail_json(msg=missing_required_lib("infinisdk"))
+
+ check_options(module)
+ execute_state(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/infinidat/infinibox/plugins/modules/infini_event.py b/ansible_collections/infinidat/infinibox/plugins/modules/infini_event.py
new file mode 100644
index 000000000..bc6cbdade
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/plugins/modules/infini_event.py
@@ -0,0 +1,141 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# pylint: disable=invalid-name,use-dict-literal,too-many-branches,too-many-locals,line-too-long,wrong-import-position
+
+"""This module sends events to Infinibox."""
+
+# Copyright: (c) 2024, Infinidat <info@infinidat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+---
+module: infini_event
+version_added: 2.16.0
+short_description: Create custom events on Infinibox
+description:
+ - This module creates events on Infinibox.
+author: David Ohlemacher (@ohlemacher)
+options:
+ description_template:
+ description:
+ - The content of the custom event
+ type: str
+ required: true
+ visibility:
+ description:
+ - The event's visibility
+ type: str
+ required: false
+ choices:
+ - CUSTOMER
+ - INFINIDAT
+ default: CUSTOMER
+ level:
+ description:
+ - The level of the custom event
+ type: str
+ required: true
+ choices:
+ - INFO
+ - WARNING
+ - ERROR
+ - CRITICAL
+ state:
+ description:
+ - Creates a custom event when present. Stat is not yet implemented. There is no way to remove events once posted, so abent is also not implemented.
+ type: str
+ required: false
+ default: present
+ choices: [ "present" ]
+
+extends_documentation_fragment:
+ - infinibox
+"""
+
+EXAMPLES = r"""
+- name: Create custom info event
+ infini_event:
+ description_template: Message content
+ level: INFO
+ state: present
+ user: admin
+ password: secret
+ system: ibox001
+"""
+
+# RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+from ansible_collections.infinidat.infinibox.plugins.module_utils.infinibox import (
+ HAS_INFINISDK,
+ infinibox_argument_spec,
+ get_system,
+)
+
+
+def handle_stat(module):
+ """Handle stat state"""
+ msg = "handle_stat() is not implemented"
+ module.exit_json(msg=msg)
+
+
+def handle_present(module):
+ """Handle present state"""
+ system = get_system(module)
+ description_template = module.params["description_template"]
+ level = module.params["level"]
+ visibility = module.params["visibility"]
+
+ path = "events/custom"
+ json_data = {
+ "description_template": description_template,
+ "level": level,
+ "visibility": visibility,
+ }
+ system.api.post(path=path, data=json_data)
+ module.exit_json(changed=True, msg="Event posted")
+
+
+def execute_state(module):
+ """Handle states"""
+ state = module.params["state"]
+ try:
+ if state == "stat":
+ handle_stat(module)
+ elif state == "present":
+ handle_present(module)
+ else:
+ module.exit_json(msg=f"Internal handler error. Invalid state: {state}")
+ finally:
+ system = get_system(module)
+ system.logout()
+
+
+def main():
+ """ Main """
+ argument_spec = infinibox_argument_spec()
+ argument_spec.update(
+ dict(
+ description_template=dict(required=True),
+ level=dict(required=True, choices=["INFO", "WARNING", "ERROR", "CRITICAL"]),
+ state=dict(required=False, default="present", choices=["present"]),
+ visibility=dict(default="CUSTOMER", required=False, choices=["CUSTOMER", "INFINIDAT"]),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_INFINISDK:
+ module.exit_json(msg=missing_required_lib("infinisdk"))
+
+ execute_state(module)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/infinidat/infinibox/plugins/modules/infini_export.py b/ansible_collections/infinidat/infinibox/plugins/modules/infini_export.py
index f83e9b1f1..409c89924 100644
--- a/ansible_collections/infinidat/infinibox/plugins/modules/infini_export.py
+++ b/ansible_collections/infinidat/infinibox/plugins/modules/infini_export.py
@@ -1,10 +1,15 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# Copyright: (c) 2022, Infinidat(info@infinidat.com)
+# pylint: disable=invalid-name,use-dict-literal,too-many-branches,too-many-locals,line-too-long,wrong-import-position
+
+"""This module modifies exports on Infinibox."""
+
+# Copyright: (c) 2024, Infinidat(info@infinidat.com)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
+
__metaclass__ = type
DOCUMENTATION = r'''
@@ -82,13 +87,13 @@ EXAMPLES = r'''
client_list:
- client: 192.168.0.2
access: RW
- no_root_squash: True
+ no_root_squash: true
- client: 192.168.0.100
access: RO
- no_root_squash: False
+ no_root_squash: false
- client: 192.168.0.10-192.168.0.20
access: RO
- no_root_squash: False
+ no_root_squash: false
system: ibox001
user: admin
password: secret
@@ -98,8 +103,6 @@ EXAMPLES = r'''
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-import traceback
-
from ansible_collections.infinidat.infinibox.plugins.module_utils.infinibox import (
HAS_INFINISDK,
api_wrapper,
@@ -110,23 +113,22 @@ from ansible_collections.infinidat.infinibox.plugins.module_utils.infinibox impo
merge_two_dicts,
)
-MUNCH_IMP_ERR = None
+HAS_MUNCH = True
try:
from munch import unmunchify
- HAS_MUNCH = True
except ImportError:
HAS_MUNCH = False
- MUNCH_IMPORT_ERROR = traceback.format_exc()
def transform(d):
+ """ Create a frozen set from a normal set's items """
return frozenset(d.items())
def create_export(module, export, filesystem, system):
- """ Create new filesystem or update existing one"""
+ """ Create new export """
if export:
- raise AssertionError("Export {0} already exists".format(export.get_name()))
+ raise AssertionError(f"Export {export.get_name()} already exists")
changed = False
name = module.params['name']
@@ -141,14 +143,13 @@ def create_export(module, export, filesystem, system):
@api_wrapper
-def update_export(module, export, filesystem, system):
- """ Create new filesystem or update existing one"""
+def update_export(module, export):
+ """ Update existing export """
if not export:
- raise AssertionError("Export {0} does not exist and cannot be updated".format(export.get_name()))
+ raise AssertionError(f"Export {export.get_name()} does not exist and cannot be updated")
changed = False
- name = module.params['name']
client_list = module.params['client_list']
if client_list:
@@ -164,21 +165,15 @@ def update_export(module, export, filesystem, system):
@api_wrapper
def delete_export(module, export):
- """ Delete file system"""
+ """ Delete export """
if not module.check_mode:
export.delete()
changed = True
return changed
-def get_sys_exp_fs(module):
- system = get_system(module)
- filesystem = get_filesystem(module, system)
- export = get_export(module, system)
- return (system, export, filesystem)
-
-
def get_export_fields(export):
+ """ Return export fields dict """
fields = export.get_fields() # from_cache=True, raw_value=True)
export_id = fields.get('id', None)
permissions = fields.get('permissions', None)
@@ -192,15 +187,13 @@ def get_export_fields(export):
def handle_stat(module):
- """
- Gather stats on export and return. Changed is always False.
- """
- system, export, filesystem = get_sys_exp_fs(module)
+ """ Gather stats on export and return. Changed is always False. """
+ name = module.params['name']
+ filesystem_name = module.params['filesystem']
+ system = get_system(module)
+ export = get_export(module, system)
if not export:
- module.fail_json(msg='Export "{0}" of file system "{1}" not found'.format(
- module.params['name'],
- module.params['filesystem'],
- ))
+ module.fail_json(msg=f"Export '{name}' of file system '{filesystem_name}' not found")
field_dict = get_export_fields(export)
result = dict(
@@ -212,30 +205,38 @@ def handle_stat(module):
def handle_present(module):
- system, export, filesystem = get_sys_exp_fs(module)
+ """ Handle present state """
+ system = get_system(module)
+ filesystem = get_filesystem(module, system)
+ export = get_export(module, system)
+ filesystem_name = module.params['filesystem']
if not filesystem:
- module.fail_json(msg='File system {0} not found'.format(module.params['filesystem']))
+ module.fail_json(msg=f'File system {filesystem_name} not found')
elif not export:
changed = create_export(module, export, filesystem, system)
module.exit_json(changed=changed, msg="File system export created")
else:
- changed = update_export(module, export, filesystem, system)
+ changed = update_export(module, export)
module.exit_json(changed=changed, msg="File system export updated")
def handle_absent(module):
- system, export, filesystem = get_sys_exp_fs(module)
+ """ Handle absent state """
+ system = get_system(module)
+ export = get_export(module, system)
+ filesystem_name = module.params['filesystem']
if not export:
changed = False
- msg = "Export of {0} already absent".format(module.params['filesystem'])
+ msg = "Export of {filesystem_name} already absent"
module.exit_json(changed=changed, msg=msg)
else:
changed = delete_export(module, export)
- msg = "Export of {0} deleted".format(module.params['filesystem'])
+ msg = f"Export of {filesystem_name} deleted"
module.exit_json(changed=changed, msg=msg)
def execute_state(module):
+ """ Execute states """
state = module.params['state']
try:
if state == 'stat':
@@ -245,13 +246,14 @@ def execute_state(module):
elif state == 'absent':
handle_absent(module)
else:
- module.fail_json(msg='Internal handler error. Invalid state: {0}'.format(state))
+ module.fail_json(msg=f'Internal handler error. Invalid state: {state}')
finally:
system = get_system(module)
system.logout()
def main():
+ """ Main """
argument_spec = infinibox_argument_spec()
argument_spec.update(
dict(
diff --git a/ansible_collections/infinidat/infinibox/plugins/modules/infini_export_client.py b/ansible_collections/infinidat/infinibox/plugins/modules/infini_export_client.py
index d35705787..d1889511f 100644
--- a/ansible_collections/infinidat/infinibox/plugins/modules/infini_export_client.py
+++ b/ansible_collections/infinidat/infinibox/plugins/modules/infini_export_client.py
@@ -1,10 +1,15 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# Copyright: (c) 2022, Infinidat <info@infinidat.com>
+# Copyright: (c) 2024, Infinidat <info@infinidat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+""" Manage Infinibox export clients """
+
+# pylint: disable=invalid-name,use-dict-literal,line-too-long,wrong-import-position, wrong-import-order
+
from __future__ import (absolute_import, division, print_function)
+
__metaclass__ = type
DOCUMENTATION = r'''
@@ -58,7 +63,7 @@ EXAMPLES = r'''
infini_export_client:
client: 10.0.0.1
access_mode: RW
- no_root_squash: yes
+ no_root_squash: true
export: /data
state: present # Default
user: admin
@@ -69,7 +74,7 @@ EXAMPLES = r'''
infini_export_client:
client: "{{ item }}"
access_mode: RO
- no_root_squash: no
+ no_root_squash: false
export: /data
user: admin
password: secret
@@ -94,7 +99,7 @@ from ansible_collections.infinidat.infinibox.plugins.module_utils.infinibox impo
merge_two_dicts,
)
-MUNCH_IMP_ERR = None
+MUNCH_IMPORT_ERROR = None
try:
from munch import Munch, unmunchify
HAS_MUNCH = True
@@ -145,7 +150,7 @@ def update_client(module, export):
@api_wrapper
def delete_client(module, export):
- """Update export client list"""
+ """delete export client from client list"""
if export is None and module.params['state'] == 'absent':
module.exit_json(changed=False)
@@ -168,13 +173,8 @@ def delete_client(module, export):
return changed
-def get_sys_exp(module):
- system = get_system(module)
- export = get_export(module, system)
- return (system, export)
-
-
def get_export_client_fields(export, client_name):
+ """ Get export client fields """
fields = export.get_fields() # from_cache=True, raw_value=True)
permissions = fields.get('permissions', None)
for munched_perm in permissions:
@@ -185,13 +185,15 @@ def get_export_client_fields(export, client_name):
no_root_squash=perm['no_root_squash'],
)
return field_dict
- raise AssertionError("No client {0} match to exports found".format(client_name))
+ raise AssertionError(f"No client {client_name} match to exports found")
def handle_stat(module):
- system, export = get_sys_exp(module)
+ """ Execute the stat state """
+ system = get_system(module)
+ export = get_export(module, system)
if not export:
- module.fail_json(msg='Export {0} not found'.format(module.params['export']))
+ module.fail_json(msg=f"Export {module.params['export']} not found")
client_name = module.params['client']
field_dict = get_export_client_fields(export, client_name)
result = dict(
@@ -203,9 +205,11 @@ def handle_stat(module):
def handle_present(module):
- system, export = get_sys_exp(module)
+ """ Execute the present state """
+ system = get_system(module)
+ export = get_export(module, system)
if not export:
- msg = 'Export {0} not found'.format(module.params['export'])
+ msg = f"Export {module.params['export']} not found"
module.fail_json(msg=msg)
changed = update_client(module, export)
@@ -214,7 +218,9 @@ def handle_present(module):
def handle_absent(module):
- system, export = get_sys_exp(module)
+ """ Execute the absent state """
+ system = get_system(module)
+ export = get_export(module, system)
if not export:
changed = False
msg = "Export client already absent"
@@ -226,6 +232,7 @@ def handle_absent(module):
def execute_state(module):
+ """ Execute a state """
state = module.params['state']
try:
if state == 'stat':
@@ -235,13 +242,14 @@ def execute_state(module):
elif state == 'absent':
handle_absent(module)
else:
- module.fail_json(msg='Internal handler error. Invalid state: {0}'.format(state))
+ module.fail_json(msg=f'Internal handler error. Invalid state: {state}')
finally:
system = get_system(module)
system.logout()
def main():
+ """ Main """
argument_spec = infinibox_argument_spec()
argument_spec.update(
dict(
diff --git a/ansible_collections/infinidat/infinibox/plugins/modules/infini_fibre_channel_switch.py b/ansible_collections/infinidat/infinibox/plugins/modules/infini_fibre_channel_switch.py
new file mode 100644
index 000000000..f64808af9
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/plugins/modules/infini_fibre_channel_switch.py
@@ -0,0 +1,180 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2024, Infinidat <info@infinidat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+""" Manage switch names on Infinibox """
+
+# pylint: disable=invalid-name,use-dict-literal,line-too-long,wrong-import-position
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+---
+module: infini_fibre_channel_switch
+version_added: 2.16.0
+short_description: Manage Infinibox FC switch names
+description:
+ - This module renames FC switch names (rename state) or shows information about FC switches (stat state)
+author: David Ohlemacher (@ohlemacher)
+options:
+ switch_name:
+ description:
+ - Current name of an existing fibre channel switch.
+ type: str
+ required: true
+ new_switch_name:
+ description:
+ - New name for an existing fibre channel switch.
+ type: str
+ required: false
+ state:
+ description:
+ - Rename an FC switch name, when using state rename.
+ - States present and absent are not implemented.
+ - State stat shows the existing FC switch details.
+ type: str
+ required: false
+ default: rename
+ choices: [ "stat", "rename" ]
+extends_documentation_fragment:
+ - infinibox
+"""
+
+EXAMPLES = r"""
+- name: Rename fibre channel switch
+ infini_fibre_channel:
+ switch_name: VSAN 100
+ state: rename
+ user: admin
+ password: secret
+ system: ibox001
+
+- name: Get information about fibre channel switch
+ infini_fibre_channel:
+ switch_name: VSAN 2000
+ state: stat
+ user: admin
+ password: secret
+ system: ibox001
+"""
+
+# RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+
+from ansible_collections.infinidat.infinibox.plugins.module_utils.infinibox import (
+ merge_two_dicts,
+ get_system,
+ infinibox_argument_spec,
+)
+
+try:
+ from infinisdk.core.exceptions import APICommandFailed
+except ImportError:
+ pass # Handled by HAS_INFINISDK from module_utils
+
+
+def find_switch_by_name(module):
+ """ Find switch by name """
+ switch = module.params['switch_name']
+ path = f"fc/switches?name={switch}"
+ system = get_system(module)
+ try:
+ switch_result = system.api.get(path=path).get_result()
+ if not switch_result:
+ msg = f"Cannot find switch {switch}"
+ module.exit_json(msg=msg)
+ except APICommandFailed as err:
+ msg = f"Cannot find switch {switch}: {err}"
+ module.exit_json(msg=msg)
+ return switch_result[0]
+
+
+def handle_stat(module):
+ """ Handle stat state """
+ switch_name = module.params['switch_name']
+ switch_result = find_switch_by_name(module)
+ result = dict(
+ changed=False,
+ msg=f"Switch stat {switch_name} found"
+ )
+ result = merge_two_dicts(result, switch_result)
+ module.exit_json(**result)
+
+
+def handle_rename(module):
+ """ Handle rename state """
+ switch_name = module.params['switch_name']
+ new_switch_name = module.params['new_switch_name']
+
+ switch_result = find_switch_by_name(module)
+ switch_id = switch_result['id']
+
+ path = f"fc/switches/{switch_id}"
+ data = {
+ "name": new_switch_name,
+ }
+ try:
+ system = get_system(module)
+ rename_result = system.api.put(path=path, data=data).get_result()
+ except APICommandFailed as err:
+ msg = f"Cannot rename fc switch {switch_name}: {err}"
+ module.exit_json(msg=msg)
+
+ result = dict(
+ changed=True,
+ msg=f"FC switch renamed from {switch_name} to {new_switch_name}"
+ )
+ result = merge_two_dicts(result, rename_result)
+ module.exit_json(**result)
+
+
+def execute_state(module):
+ """Handle states"""
+ state = module.params["state"]
+ try:
+ if state == "stat":
+ handle_stat(module)
+ elif state == "rename":
+ handle_rename(module)
+ else:
+ module.exit_json(msg=f"Internal handler error. Invalid state: {state}")
+ finally:
+ system = get_system(module)
+ system.logout()
+
+
+def check_options(module):
+ """Verify module options are sane"""
+ new_switch_name = module.params["new_switch_name"]
+ state = module.params["state"]
+
+ if state in ["rename"]:
+ if not new_switch_name:
+ msg = "New switch name parameter must be provided"
+ module.exit_json(msg=msg)
+
+
+def main():
+ """ Main """
+ argument_spec = infinibox_argument_spec()
+ argument_spec.update(
+ dict(
+ switch_name=dict(required=True, type="str"),
+ new_switch_name=dict(required=False, type="str"),
+ state=dict(default="rename", choices=["stat", "rename"]),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ check_options(module)
+ execute_state(module)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/infinidat/infinibox/plugins/modules/infini_fs.py b/ansible_collections/infinidat/infinibox/plugins/modules/infini_fs.py
index f9cd2bd56..d4d75a07a 100644
--- a/ansible_collections/infinidat/infinibox/plugins/modules/infini_fs.py
+++ b/ansible_collections/infinidat/infinibox/plugins/modules/infini_fs.py
@@ -1,27 +1,77 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# Copyright: (c) 2022, Infinidat <info@infinidat.com>
+# pylint: disable=invalid-name,use-dict-literal,too-many-branches,too-many-locals,line-too-long,wrong-import-position
+
+"""This module manages file systems on Infinibox."""
+
+# Copyright: (c) 2024, Infinidat <info@infinidat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
+from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
---
module: infini_fs
-version_added: '2.3.0'
+version_added: 2.3.0
short_description: Create, Delete or Modify filesystems on Infinibox
description:
- This module creates, deletes or modifies filesystems on Infinibox.
author: David Ohlemacher (@ohlemacher)
options:
+ fs_type:
+ description:
+ - Specifies the file system type, regular or snapshot.
+ type: str
+ required: false
+ default: master
+ choices: [ "master", "snapshot" ]
name:
description:
- File system name.
+ required: false
+ type: str
+ parent_fs_name:
+ description:
+ - Specify a fs name. This is the fs parent for creating a snapshot. Required if fs_type is snapshot.
+ type: str
+ required: false
+ pool:
+ description:
+ - Pool that will host file system.
required: true
type: str
+ restore_fs_from_snapshot:
+ description:
+ - Specify true to restore a file system (parent_fs_name) from an existing snapshot specified by the name field.
+ - State must be set to present and fs_type must be 'snapshot'.
+ type: bool
+ required: false
+ default: false
+ serial:
+ description:
+ - Serial number matching an existing file system.
+ required: false
+ type: str
+ size:
+ description:
+ - File system size in MB, GB or TB units. See examples.
+ required: false
+ type: str
+ snapshot_lock_expires_at:
+ description:
+ - This will cause a snapshot to be locked at the specified date-time.
+ Uses python's datetime format YYYY-mm-dd HH:MM:SS.ffffff, e.g. 2020-02-13 16:21:59.699700
+ type: str
+ required: false
+ snapshot_lock_only:
+ description:
+ - This will lock an existing snapshot but will suppress refreshing the snapshot.
+ type: bool
+ required: false
+ default: false
state:
description:
- Creates/Modifies file system when present or removes when absent.
@@ -35,63 +85,96 @@ options:
required: false
default: true
type: bool
- pool:
+ write_protected:
description:
- - Pool that will host file system.
- required: true
+ - Specifies if the file system should be write protected. Default will be True for snapshots, False for master file systems.
type: str
- size:
- description:
- - File system size in MB, GB or TB units. See examples.
required: false
- type: str
+ default: "Default"
+ choices: ["Default", "True", "False"]
extends_documentation_fragment:
- infinibox
requirements:
- capacity
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Create new file system named foo under pool named bar
infini_fs:
name: foo
- size: 1TB
+ size: 1GB
pool: bar
thin_provision: true
state: present
user: admin
password: secret
system: ibox001
-'''
+- name: Create snapshot named foo_snap from fs named foo
+ infini_fs:
+ name: foo_snap
+ pool: bar
+ fs_type: snapshot
+ parent_fs_name: foo
+ state: present
+ user: admin
+ password: secret
+ system: ibox001
+- name: Stat snapshot, also a fs, named foo_snap
+ infini_fs:
+ name: foo_snap
+ pool: bar
+ state: present
+ user: admin
+ password: secret
+ system: ibox001
+- name: Remove snapshot, also a fs, named foo_snap
+ infini_fs:
+ name: foo_snap
+ state: absent
+ user: admin
+ password: secret
+ system: ibox001
+"""
# RETURN = r''' # '''
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-import traceback
-
+HAS_INFINISDK = True
try:
from ansible_collections.infinidat.infinibox.plugins.module_utils.infinibox import (
- HAS_INFINISDK,
api_wrapper,
- infinibox_argument_spec,
+ check_snapshot_lock_options,
+ get_filesystem,
+ get_fs_by_sn,
get_pool,
get_system,
- get_filesystem
+ infinibox_argument_spec,
+ manage_snapshot_locks,
)
except ModuleNotFoundError:
from infinibox import ( # Used when hacking
- HAS_INFINISDK,
api_wrapper,
- infinibox_argument_spec,
+ check_snapshot_lock_options,
+ get_filesystem,
get_pool,
get_system,
- get_filesystem
+ infinibox_argument_spec,
+ manage_snapshot_locks,
)
+except ImportError:
+ HAS_INFINISDK = False
+
+try:
+ from infinisdk.core.exceptions import APICommandFailed
+ from infinisdk.core.exceptions import ObjectNotFound
+except ImportError:
+ HAS_INFINISDK = False
CAPACITY_IMP_ERR = None
try:
from capacity import KiB, Capacity
+
HAS_CAPACITY = True
except ImportError:
HAS_CAPACITY = False
@@ -99,152 +182,414 @@ except ImportError:
@api_wrapper
def create_filesystem(module, system):
- """Create Filesystem"""
- changed = True
+ """ Create Filesystem """
+ changed = False
if not module.check_mode:
- if module.params['thin_provision']:
- provisioning = 'THIN'
+ if module.params["thin_provision"]:
+ provisioning = "THIN"
else:
- provisioning = 'THICK'
+ provisioning = "THICK"
+
filesystem = system.filesystems.create(
- name=module.params['name'],
- pool=get_pool(module, system),
+ name=module.params["name"],
provtype=provisioning,
+ pool=get_pool(module, system),
)
- if module.params['size']:
- size = Capacity(module.params['size']).roundup(64 * KiB)
+ if module.params["size"]:
+ size = Capacity(module.params["size"]).roundup(64 * KiB)
filesystem.update_size(size)
+
+ is_write_prot = filesystem.is_write_protected()
+ desired_is_write_prot = module.params["write_protected"]
+ if is_write_prot != desired_is_write_prot:
+ filesystem.update_field("write_protected", desired_is_write_prot)
+ changed = True
return changed
@api_wrapper
def update_filesystem(module, filesystem):
- """Update Filesystem"""
+ """ Update Filesystem """
changed = False
- if module.params['size']:
- size = Capacity(module.params['size']).roundup(64 * KiB)
+
+ if module.check_mode:
+ return changed
+
+ if module.params["size"]:
+ size = Capacity(module.params["size"]).roundup(64 * KiB)
if filesystem.get_size() != size:
- if not module.check_mode:
- filesystem.update_size(size)
+ filesystem.update_size(size)
changed = True
+ if module.params["thin_provision"] is not None:
provisioning = str(filesystem.get_provisioning())
- if provisioning == 'THICK' and module.params['thin_provision']:
- if not module.check_mode:
- filesystem.update_provisioning('THIN')
+ if provisioning == "THICK" and module.params["thin_provision"]:
+ filesystem.update_provisioning("THIN")
changed = True
- if provisioning == 'THIN' and not module.params['thin_provision']:
- if not module.check_mode:
- filesystem.update_provisioning('THICK')
+ if provisioning == "THIN" and not module.params["thin_provision"]:
+ filesystem.update_provisioning("THICK")
changed = True
+
+ is_write_prot = filesystem.is_write_protected()
+ desired_is_write_prot = module.params["write_protected"]
+ if is_write_prot != desired_is_write_prot:
+ filesystem.update_field("write_protected", desired_is_write_prot)
+ changed = True
+
return changed
@api_wrapper
def delete_filesystem(module, filesystem):
- """ Delete Filesystem"""
+ """ Delete Filesystem """
+ changed = False
if not module.check_mode:
filesystem.delete()
- module.exit_json(changed=True)
+ changed = True
+ return changed
-def get_sys_pool_fs(module):
- system = get_system(module)
- pool = get_pool(module, system)
- filesystem = get_filesystem(module, system)
- return (system, pool, filesystem)
+@api_wrapper
+def create_fs_snapshot(module, system):
+ """ Create Snapshot from parent fs """
+ snapshot_name = module.params["name"]
+ parent_fs_name = module.params["parent_fs_name"]
+ changed = False
+ if not module.check_mode:
+ try:
+ parent_fs = system.filesystems.get(name=parent_fs_name)
+ except ObjectNotFound:
+ msg = f"Cannot create snapshot {snapshot_name}. Parent file system {parent_fs_name} not found"
+ module.fail_json(msg=msg)
+ if not parent_fs:
+ msg = f"Cannot find new snapshot's parent file system named {parent_fs_name}"
+ module.fail_json(msg=msg)
+ if not module.check_mode:
+ if module.params["snapshot_lock_only"]:
+ msg = "Snapshot does not exist. Cannot comply with 'snapshot_lock_only: true'."
+ module.fail_json(msg=msg)
+ check_snapshot_lock_options(module)
+ snapshot = parent_fs.create_snapshot(name=snapshot_name)
+
+ is_write_prot = snapshot.is_write_protected()
+ desired_is_write_prot = module.params["write_protected"]
+ if is_write_prot != desired_is_write_prot:
+ snapshot.update_field("write_protected", desired_is_write_prot)
+
+ manage_snapshot_locks(module, snapshot)
+ changed = True
+ return changed
+
+
+@api_wrapper
+def update_fs_snapshot(module, snapshot):
+ """ Update/refresh fs snapshot. May also lock it. """
+ refresh_changed = False
+ lock_changed = False
+ if not module.check_mode:
+ if not module.params["snapshot_lock_only"]:
+ snap_is_locked = snapshot.get_lock_state() == "LOCKED"
+ if not snap_is_locked:
+ if not module.check_mode:
+ snapshot.refresh_snapshot()
+ refresh_changed = True
+ else:
+ msg = "File system snapshot is locked and may not be refreshed"
+ module.fail_json(msg=msg)
+
+ check_snapshot_lock_options(module)
+ lock_changed = manage_snapshot_locks(module, snapshot)
+
+ if module.params["write_protected"] is not None:
+ is_write_prot = snapshot.is_write_protected()
+ desired_is_write_prot = module.params["write_protected"]
+ if is_write_prot != desired_is_write_prot:
+ snapshot.update_field("write_protected", desired_is_write_prot)
+
+ return refresh_changed or lock_changed
+
+
+@api_wrapper
+def find_fs_id(module, system, fs_name):
+ """ Find the ID of this fs """
+ fs_url = f"filesystems?name={fs_name}&fields=id"
+ fs = system.api.get(path=fs_url)
+
+ result = fs.get_json()["result"]
+ if len(result) != 1:
+ module.fail_json(f"Cannot find a file ststem with name '{fs_name}'")
+
+ fs_id = result[0]["id"]
+ return fs_id
+
+
+@api_wrapper
+def restore_fs_from_snapshot(module, system):
+ """ Use snapshot to restore a file system """
+ changed = False
+ is_restoring = module.params["restore_fs_from_snapshot"]
+ fs_type = module.params["fs_type"]
+ snap_name = module.params["name"]
+ snap_id = find_fs_id(module, system, snap_name)
+ parent_fs_name = module.params["parent_fs_name"]
+ parent_fs_id = find_fs_id(module, system, parent_fs_name)
+
+ # Check params
+ if not is_restoring:
+ raise AssertionError("A programming error occurred. is_restoring is not True")
+ if fs_type != "snapshot":
+ module.exit_json(msg="Cannot restore a parent file system from snapshot unless the file system type is 'snapshot'")
+ if not parent_fs_name:
+ module.exit_json(msg="Cannot restore a parent file system from snapshot unless the parent file system name is specified")
+
+ if not module.check_mode:
+ restore_url = f"filesystems/{parent_fs_id}/restore?approved=true"
+ restore_data = {
+ "source_id": snap_id,
+ }
+ try:
+ system.api.post(path=restore_url, data=restore_data)
+ changed = True
+ except APICommandFailed as err:
+ module.fail_json(msg=f"Cannot restore file system {parent_fs_name} from snapshot {snap_name}: {str(err)}")
+ return changed
def handle_stat(module):
- system, pool, filesystem = get_sys_pool_fs(module)
- if not pool:
- module.fail_json(msg='Pool {0} not found'.format(module.params['pool']))
+ """ Handle the stat state """
+ system = get_system(module)
+ pool = get_pool(module, system)
+ if module.params["name"]:
+ filesystem = get_filesystem(module, system)
+ else:
+ filesystem = get_fs_by_sn(module, system)
+ fs_type = module.params["fs_type"]
+
+ if fs_type == "master":
+ if not pool:
+ module.fail_json(msg=f"Pool {module.params['pool']} not found")
if not filesystem:
- module.fail_json(msg='File system {0} not found'.format(module.params['name']))
+ module.fail_json(msg=f"File system {module.params['name']} not found")
fields = filesystem.get_fields() # from_cache=True, raw_value=True)
+
+ created_at = str(fields.get("created_at", None))
+ filesystem_id = fields.get("id", None)
+ filesystem_type = fields.get("type", None)
+ has_children = fields.get("has_children", None)
+ lock_expires_at = str(filesystem.get_lock_expires_at())
+ lock_state = filesystem.get_lock_state()
+ mapped = str(fields.get("mapped", None))
name = fields.get("name", None)
- used = fields.get('used_size', None)
- filesystem_id = fields.get('id', None)
- provisioning = fields.get('provisioning', None)
+ parent_id = fields.get("parent_id", None)
+ provisioning = fields.get("provisioning", None)
+ serial = fields.get("serial", None)
+ size = str(filesystem.get_size())
+ updated_at = str(fields.get("updated_at", None))
+ used = str(fields.get("used_size", None))
+ write_protected = fields.get("write_protected", None)
+ if filesystem_type == "SNAPSHOT":
+ msg = "File system snapshot stat found"
+ else:
+ msg = "File system stat found"
result = dict(
changed=False,
- name=name,
- size=str(filesystem.get_size()),
- used=str(used),
+ created_at=created_at,
filesystem_id=filesystem_id,
+ filesystem_type=filesystem_type,
+ has_children=has_children,
+ lock_state=lock_state,
+ lock_expires_at=lock_expires_at,
+ mapped=mapped,
+ msg=msg,
+ name=name,
+ parent_id=parent_id,
provisioning=provisioning,
- msg='File system stat found'
+ serial=serial,
+ size=size,
+ updated_at=updated_at,
+ used=used,
+ write_protected=write_protected,
)
module.exit_json(**result)
def handle_present(module):
- system, pool, filesystem = get_sys_pool_fs(module)
- if not pool:
- module.fail_json(msg='Pool {0} not found'.format(module.params['pool']))
- if not filesystem:
- changed = create_filesystem(module, system)
- module.exit_json(changed=changed, msg="File system created")
+ """ Handle the present state """
+ system = get_system(module)
+ pool = get_pool(module, system)
+ if module.params["name"]:
+ filesystem = get_filesystem(module, system)
else:
- changed = update_filesystem(module, filesystem)
- module.exit_json(changed=changed, msg="File system updated")
+ filesystem = get_fs_by_sn(module, system)
+ fs_type = module.params["fs_type"]
+ is_restoring = module.params["restore_fs_from_snapshot"]
+ if fs_type == "master":
+ if not pool:
+ module.fail_json(msg=f"Pool {module.params['pool']} not found")
+ if not filesystem:
+ changed = create_filesystem(module, system)
+ module.exit_json(changed=changed, msg="File system created")
+ else:
+ changed = update_filesystem(module, filesystem)
+ module.exit_json(changed=changed, msg="File system updated")
+ elif fs_type == "snapshot":
+ snapshot = filesystem
+ if is_restoring:
+ # Restore fs from snapshot
+ changed = restore_fs_from_snapshot(module, system)
+ snap_fs_name = module.params["name"]
+ parent_fs_name = module.params["parent_fs_name"]
+ msg = f"File system {parent_fs_name} restored from snapshot {snap_fs_name}"
+ module.exit_json(changed=changed, msg=msg)
+ else:
+ if not snapshot:
+ changed = create_fs_snapshot(module, system)
+ module.exit_json(changed=changed, msg="File system snapshot created")
+ else:
+ changed = update_fs_snapshot(module, filesystem)
+ module.exit_json(changed=changed, msg="File system snapshot updated")
def handle_absent(module):
- system, pool, filesystem = get_sys_pool_fs(module)
+ """ Handle the absent state """
+ system = get_system(module)
+ pool = get_pool(module, system)
+ if module.params["name"]:
+ filesystem = get_filesystem(module, system)
+ else:
+ filesystem = get_fs_by_sn(module, system)
+
+ if filesystem and filesystem.get_lock_state() == "LOCKED":
+ msg = "Cannot delete snapshot. Locked."
+ module.fail_json(changed=False, msg=msg)
+
if not pool or not filesystem:
module.exit_json(changed=False, msg="File system already absent")
- else:
+
+ existing_fs_type = filesystem.get_type()
+
+ if existing_fs_type == "MASTER":
changed = delete_filesystem(module, filesystem)
module.exit_json(changed=changed, msg="File system removed")
+ elif existing_fs_type == "SNAPSHOT":
+ snapshot = filesystem
+ changed = delete_filesystem(module, snapshot)
+ module.exit_json(changed=changed, msg="Snapshot removed")
+ else:
+ module.fail_json(msg="A programming error has occured")
def execute_state(module):
- state = module.params['state']
+ """ Execute states """
+ state = module.params["state"]
try:
- if state == 'stat':
+ if state == "stat":
handle_stat(module)
- elif state == 'present':
+ elif state == "present":
handle_present(module)
- elif state == 'absent':
+ elif state == "absent":
handle_absent(module)
else:
- module.fail_json(msg='Internal handler error. Invalid state: {0}'.format(state))
+ module.fail_json(msg=f"Internal handler error. Invalid state: {state}")
finally:
system = get_system(module)
system.logout()
+def check_options(module):
+ """Verify module options are sane"""
+ name = module.params["name"]
+ serial = module.params["serial"]
+ state = module.params["state"]
+ size = module.params["size"]
+ pool = module.params["pool"]
+ fs_type = module.params["fs_type"]
+ parent_fs_name = module.params["parent_fs_name"]
+
+ if state == "stat":
+ if not name and not serial:
+ msg = "Name or serial parameter must be provided"
+ module.fail_json(msg=msg)
+ if state in ["present", "absent"]:
+ if not name:
+ msg = "Name parameter must be provided"
+ module.fail_json(msg=msg)
+
+ if state == "present":
+ if fs_type == "master":
+ if parent_fs_name:
+ msg = "parent_fs_name should not be specified "
+ msg += "if fs_type is 'master'. Used for snapshots only."
+ module.fail_json(msg=msg)
+ if not size:
+ msg = "Size is required to create a master file system"
+ module.fail_json(msg=msg)
+ if not pool:
+ msg = "For state 'present', pool is required"
+ module.fail_json(msg=msg)
+ elif fs_type == "snapshot":
+ if size:
+ msg = "Size should not be specified "
+ msg += "for fs_type snapshot"
+ module.fail_json(msg=msg)
+ if not parent_fs_name:
+ msg = "For state 'present' and fs_type 'snapshot', "
+ msg += "parent_fs_name is required"
+ module.fail_json(msg=msg)
+ else:
+ msg = "A programming error has occurred"
+ module.fail_json(msg=msg)
+
+
def main():
+ """ Main """
argument_spec = infinibox_argument_spec()
argument_spec.update(
dict(
- name=dict(required=True),
- state=dict(default='present', choices=['stat', 'present', 'absent']),
+ fs_type=dict(choices=["master", "snapshot"], default="master"),
+ name=dict(required=False, default=None),
+ parent_fs_name=dict(default=None, required=False),
pool=dict(required=True),
+ restore_fs_from_snapshot=dict(default=False, type="bool"),
+ serial=dict(required=False, default=None),
size=dict(),
- thin_provision=dict(type=bool, default=True),
+ snapshot_lock_expires_at=dict(),
+ snapshot_lock_only=dict(required=False, type="bool", default=False),
+ state=dict(default="present", choices=["stat", "present", "absent"]),
+ thin_provision=dict(default=True, type="bool"),
+ write_protected=dict(choices=["True", "False", "Default"], default="Default"),
)
)
module = AnsibleModule(argument_spec, supports_check_mode=True)
+ if module.params["write_protected"] == "Default":
+ if module.params["fs_type"] == "master": # Use default for master fs
+ module.params["write_protected"] = False
+ else: # Use default for snapshot
+ module.params["write_protected"] = True
+ else:
+ module.params["write_protected"] = module.params["write_protected"] == "True"
+
if not HAS_INFINISDK:
- module.fail_json(msg=missing_required_lib('infinisdk'))
+ module.fail_json(msg=missing_required_lib("infinisdk"))
if not HAS_CAPACITY:
- module.fail_json(msg=missing_required_lib('capacity'))
+ module.fail_json(msg=missing_required_lib("capacity"))
- if module.params['size']:
+ if module.params["size"]:
try:
- Capacity(module.params['size'])
- except Exception:
- module.fail_json(msg='size (Physical Capacity) should be defined in MB, GB, TB or PB units')
+ Capacity(module.params["size"])
+ except Exception: # pylint: disable=broad-exception-caught
+ module.fail_json(
+ msg="size (Physical Capacity) should be defined in MB, GB, TB or PB units"
+ )
+ check_options(module)
execute_state(module)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ansible_collections/infinidat/infinibox/plugins/modules/infini_host.py b/ansible_collections/infinidat/infinibox/plugins/modules/infini_host.py
index 68d78546e..91eeab2ee 100644
--- a/ansible_collections/infinidat/infinibox/plugins/modules/infini_host.py
+++ b/ansible_collections/infinidat/infinibox/plugins/modules/infini_host.py
@@ -1,7 +1,11 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# Copyright: (c) 2022, Infinidat <info@infinidat.com>
+# pylint: disable=invalid-name,use-list-literal,use-dict-literal,line-too-long,wrong-import-position,multiple-statements
+
+""" Manage hosts on Infinibox """
+
+# Copyright: (c) 2024, Infinidat <info@infinidat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
@@ -11,7 +15,7 @@ __metaclass__ = type
DOCUMENTATION = r'''
---
module: infini_host
-version_added: '2.3.0'
+version_added: 2.3.0
short_description: Create, Delete or Modify Hosts on Infinibox
description:
- This module creates, deletes or modifies hosts on Infinibox.
@@ -20,10 +24,12 @@ options:
name:
description:
- Host Name
+ type: str
required: true
state:
description:
- Creates/Modifies Host when present or removes when absent
+ type: str
required: false
default: present
choices: [ "stat", "present", "absent" ]
@@ -44,9 +50,6 @@ EXAMPLES = r'''
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-import traceback
-
-from infi.dtypes.iqn import make_iscsi_name
from ansible_collections.infinidat.infinibox.plugins.module_utils.infinibox import (
HAS_INFINISDK,
api_wrapper,
@@ -60,22 +63,16 @@ from ansible_collections.infinidat.infinibox.plugins.module_utils.infinibox impo
@api_wrapper
def create_host(module, system):
-
+ """ Create a host """
changed = True
-
if not module.check_mode:
- host = system.hosts.create(name=module.params['name'])
- return changed
-
-
-@api_wrapper
-def update_host(module, host):
- changed = False
+ system.hosts.create(name=module.params['name'])
return changed
@api_wrapper
def delete_host(module, host):
+ """ Delete a host """
changed = True
if not module.check_mode:
# May raise APICommandFailed if mapped, etc.
@@ -83,13 +80,8 @@ def delete_host(module, host):
return changed
-def get_sys_host(module):
- system = get_system(module)
- host = get_host(module, system)
- return (system, host)
-
-
def get_host_fields(host):
+ """ Get host fields """
fields = host.get_fields(from_cache=True, raw_value=True)
created_at, created_at_timezone = unixMillisecondsToDate(fields.get('created_at', None))
field_dict = dict(
@@ -117,45 +109,52 @@ def get_host_fields(host):
def handle_stat(module):
- system, host = get_sys_host(module)
+ """ Handle the stat state """
+ system = get_system(module)
+ host = get_host(module, system)
host_name = module.params["name"]
if not host:
- module.fail_json(msg='Host {0} not found'.format(host_name))
+ module.fail_json(msg=f'Host {host_name} not found')
field_dict = get_host_fields(host)
result = dict(
changed=False,
- msg='Host stat found'
+ msg=f'Host {host_name} stat found'
)
result = merge_two_dicts(result, field_dict)
module.exit_json(**result)
def handle_present(module):
- system, host = get_sys_host(module)
+ """ Handle the present state """
+ system = get_system(module)
+ host = get_host(module, system)
host_name = module.params["name"]
if not host:
changed = create_host(module, system)
- msg = 'Host {0} created'.format(host_name)
+ msg = f'Host {host_name} created'
module.exit_json(changed=changed, msg=msg)
else:
- changed = update_host(module, host)
- msg = 'Host {0} updated'.format(host_name)
+ changed = False
+ msg = f'Host {host_name} exists and does not need to be updated'
module.exit_json(changed=changed, msg=msg)
def handle_absent(module):
- system, host = get_sys_host(module)
+ """ Handle the absent state """
+ system = get_system(module)
+ host = get_host(module, system)
host_name = module.params["name"]
if not host:
- msg = "Host {0} already absent".format(host_name)
+ msg = f"Host {host_name} already absent"
module.exit_json(changed=False, msg=msg)
else:
changed = delete_host(module, host)
- msg = "Host {0} removed".format(host_name)
+ msg = f"Host {host_name} removed"
module.exit_json(changed=changed, msg=msg)
def execute_state(module):
+ """ Execute a state """
state = module.params['state']
try:
if state == 'stat':
@@ -165,13 +164,14 @@ def execute_state(module):
elif state == 'absent':
handle_absent(module)
else:
- module.fail_json(msg='Internal handler error. Invalid state: {0}'.format(state))
+ module.fail_json(msg=f'Internal handler error. Invalid state: {state}')
finally:
system = get_system(module)
system.logout()
def main():
+ """ Main """
argument_spec = infinibox_argument_spec()
argument_spec.update(
dict(
diff --git a/ansible_collections/infinidat/infinibox/plugins/modules/infini_map.py b/ansible_collections/infinidat/infinibox/plugins/modules/infini_map.py
index e3757e021..1111930a3 100644
--- a/ansible_collections/infinidat/infinibox/plugins/modules/infini_map.py
+++ b/ansible_collections/infinidat/infinibox/plugins/modules/infini_map.py
@@ -1,7 +1,11 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# Copyright: (c) 2022, Infinidat <info@infinidat.com>
+# pylint: disable=invalid-name,use-dict-literal,line-too-long,wrong-import-position
+
+"""This module creates, deletes or modifies mappings on Infinibox."""
+
+# Copyright: (c) 2024, Infinidat <info@infinidat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
@@ -11,7 +15,7 @@ __metaclass__ = type
DOCUMENTATION = r'''
---
module: infini_map
-version_added: '2.9.0'
+version_added: 2.9.0
short_description: Create and Delete mapping of a volume to a host or cluster on Infinibox
description:
- This module creates or deletes mappings of volumes to hosts or clusters
@@ -24,10 +28,12 @@ options:
host:
description:
- Host Name
+ type: str
required: false
cluster:
description:
- Cluster Name
+ type: str
required: false
state:
description:
@@ -40,10 +46,12 @@ options:
volume:
description:
- Volume name to map to the host.
+ type: str
required: true
lun:
description:
- Volume lun.
+ type: int
extends_documentation_fragment:
- infinibox
'''
@@ -86,15 +94,8 @@ EXAMPLES = r'''
password: secret
'''
-
# RETURN = r''' # '''
-import traceback
-# import sh
-
-# rescan_scsi = sh.Command("rescan-scsi-bus.sh")
-# rescan_scsi_remove = rescan_scsi.bake("--remove")
-
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible_collections.infinidat.infinibox.plugins.module_utils.infinibox import (
@@ -102,7 +103,6 @@ from ansible_collections.infinidat.infinibox.plugins.module_utils.infinibox impo
api_wrapper,
get_cluster,
get_host,
- get_pool,
get_system,
get_volume,
infinibox_argument_spec,
@@ -110,39 +110,31 @@ from ansible_collections.infinidat.infinibox.plugins.module_utils.infinibox impo
)
try:
- from infinisdk.core.exceptions import APICommandFailed, ObjectNotFound
+ from infinisdk.core.exceptions import APICommandFailed
except ImportError:
pass # Handled by HAS_INFINISDK from module_utils
def vol_is_mapped_to_host(volume, host):
- volume_fields = volume.get_fields()
- volume_id = volume_fields.get('id')
+ """ Return a bool showing if a vol is mapped to a host """
host_luns = host.get_luns()
- # print('volume id: {0}'.format(volume_id))
- # print('host luns: {0}'.format(str(host_luns)))
for lun in host_luns:
if lun.volume == volume:
- # print('found mapped volume: {0}'.format(volume))
return True
return False
def vol_is_mapped_to_cluster(volume, cluster):
- volume_fields = volume.get_fields()
- volume_id = volume_fields.get('id')
+ """ Return a bool showing if a vol is mapped to a cluster """
cluster_luns = cluster.get_luns()
- # print('volume id: {0}'.format(volume_id))
- # print('host luns: {0}'.format(str(host_luns)))
-
for lun in cluster_luns:
if lun.volume == volume:
- # print('found mapped volume: {0}'.format(volume))
return True
return False
def find_host_lun_use(module, host, volume):
+ """ Return a dict showing if a host lun matches a volume. """
check_result = {'lun_used': False, 'lun_volume_matches': False}
desired_lun = module.params['lun']
@@ -158,12 +150,13 @@ def find_host_lun_use(module, host, volume):
def find_cluster_lun_use(module, cluster, volume):
+ """ Return a dict showing if a cluster lun matches a volume. """
check_result = {'lun_used': False, 'lun_volume_matches': False}
desired_lun = module.params['lun']
if desired_lun:
for cluster_lun in cluster.get_luns():
- if desired_lun == cluster.lun:
+ if desired_lun == cluster_lun:
if cluster.volume == volume:
check_result = {'lun_used': True, 'lun_volume_matches': True}
else:
@@ -173,6 +166,7 @@ def find_cluster_lun_use(module, cluster, volume):
def find_host_lun(host, volume):
+ """ Find a hosts lun """
found_lun = None
luns = host.get_luns()
@@ -183,6 +177,7 @@ def find_host_lun(host, volume):
def find_cluster_lun(cluster, volume):
+ """ Find a cluster's LUN """
found_lun = None
luns = cluster.get_luns()
@@ -194,12 +189,8 @@ def find_cluster_lun(cluster, volume):
@api_wrapper
def create_mapping(module, system):
- """
- Create mapping of volume to host or cluster. If already mapped, exit_json with changed False.
- """
+ """ Create mapping of volume to host or cluster. If already mapped, exit_json with changed False. """
- host_name = module.params['host']
- cluster_name = module.params['cluster']
host = get_host(module, system)
cluster = get_cluster(module, system)
@@ -221,20 +212,18 @@ def create_mapping(module, system):
@api_wrapper
def create_mapping_to_cluster(module, system):
- """
- Create mapping of volume to cluster. If already mapped, exit_json with changed False.
- """
+ """ Create mapping of volume to cluster. If already mapped, exit_json with changed False. """
changed = False
cluster = get_cluster(module, system)
volume = get_volume(module, system)
+ volume_name = module.params['volume']
+ cluster_name = module.params['cluster']
+ lun_name = module.params['lun']
lun_use = find_cluster_lun_use(module, cluster, volume)
if lun_use['lun_used']:
- msg = "Cannot create mapping of volume '{0}' to cluster '{1}' using lun '{2}'. Lun in use.".format(
- volume.get_name(),
- cluster.get_name(),
- module.params['lun'])
+ msg = f"Cannot create mapping of volume '{volume_name}' to cluster '{cluster_name}' using lun '{lun_name}'. Lun in use."
module.fail_json(msg=msg)
try:
@@ -244,30 +233,26 @@ def create_mapping_to_cluster(module, system):
changed = True
except APICommandFailed as err:
if "is already mapped" not in str(err):
- module.fail_json('Cannot map volume {0} to cluster {1}: {2}. Already mapped.'.format(
- module.params['volume'],
- module.params['cluster'],
- str(err)))
+ msg = f"Cannot map volume '{volume_name}' to cluster '{cluster_name}': {str(err)}. Already mapped."
+ module.fail_json(msg=msg)
return changed
@api_wrapper
def create_mapping_to_host(module, system):
- """
- Create mapping of volume to host. If already mapped, exit_json with changed False.
- """
+ """ Create mapping of volume to host. If already mapped, exit_json with changed False. """
changed = False
host = system.hosts.get(name=module.params['host'])
volume = get_volume(module, system)
+ volume_name = module.params['volume']
+ host_name = module.params['host']
+ lun_name = module.params['lun']
lun_use = find_host_lun_use(module, host, volume)
if lun_use['lun_used']:
- msg = "Cannot create mapping of volume '{0}' to host '{1}' using lun '{2}'. Lun in use.".format(
- volume.get_name(),
- host.get_name(),
- module.params['lun'])
+ msg = f"Cannot create mapping of volume '{volume_name}' to host '{host_name}' using lun '{lun_name}'. Lun in use."
module.fail_json(msg=msg)
try:
@@ -277,35 +262,29 @@ def create_mapping_to_host(module, system):
changed = True
except APICommandFailed as err:
if "is already mapped" not in str(err):
- module.fail_json('Cannot map volume {0} to host {1}: {2}. Already mapped.'.format(
- module.params['volume'],
- module.params['host'],
- str(err)))
+ msg = f"Cannot map volume '{host_name}' to host '{host_name}': {str(err)}. Already mapped."
+ module.fail_json(msg=msg)
return changed
@api_wrapper
def update_mapping_to_host(module, system):
+ """ Update a mapping to a host """
host = get_host(module, system)
volume = get_volume(module, system)
+ volume_name = module.params['volume']
+ host_name = module.params['host']
desired_lun = module.params['lun']
if not vol_is_mapped_to_host(volume, host):
- msg = "Volume {0} is not mapped to host {1}".format(
- volume.get_name(),
- host.get_name(),
- )
+ msg = f"Volume '{volume_name}' is not mapped to host '{host_name}'"
module.fail_json(msg=msg)
if desired_lun:
found_lun = find_host_lun(host, volume)
if found_lun != desired_lun:
- msg = "Cannot change the lun from '{0}' to '{1}' for existing mapping of volume '{2}' to host '{3}'".format(
- found_lun,
- desired_lun,
- volume.get_name(),
- host.get_name())
+ msg = f"Cannot change the lun from '{found_lun}' to '{desired_lun}' for existing mapping of volume '{volume_name}' to host '{host_name}'"
module.fail_json(msg=msg)
changed = False
@@ -314,25 +293,21 @@ def update_mapping_to_host(module, system):
@api_wrapper
def update_mapping_to_cluster(module, system):
+ """ Update a mapping to a cluster """
cluster = get_cluster(module, system)
volume = get_volume(module, system)
desired_lun = module.params['lun']
+ volume_name = module.params['volume']
+ cluster_name = module.params['cluster']
if not vol_is_mapped_to_cluster(volume, cluster):
- msg = "Volume {0} is not mapped to cluster {1}".format(
- volume.get_name(),
- cluster.get_name(),
- )
+ msg = f"Volume {volume_name} is not mapped to cluster {cluster_name}"
module.fail_json(msg=msg)
if desired_lun:
found_lun = find_cluster_lun(cluster, volume)
if found_lun != desired_lun:
- msg = "Cannot change the lun from '{0}' to '{1}' for existing mapping of volume '{2}' to cluster '{3}'".format(
- found_lun,
- desired_lun,
- volume.get_name(),
- cluster.get_name())
+ msg = f"Cannot change the lun from '{found_lun}' to '{desired_lun}' for existing mapping of volume '{volume_name}' to cluster '{cluster_name}'"
module.fail_json(msg=msg)
changed = False
@@ -341,6 +316,7 @@ def update_mapping_to_cluster(module, system):
@api_wrapper
def delete_mapping(module, system):
+ """ Delete a mapping """
host = get_host(module, system)
cluster = get_cluster(module, system)
if host:
@@ -373,34 +349,22 @@ def delete_mapping_to_host(module, system):
if not module.check_mode:
volume = get_volume(module, system)
host = get_host(module, system)
+ volume_name = module.params['volume']
+ host_name = module.params['host']
if volume and host:
try:
existing_lun = find_host_lun(host, volume)
host.unmap_volume(volume)
changed = True
- msg = "Volume '{0}' was unmapped from host '{1}' freeing lun '{2}'".format(
- module.params['volume'],
- module.params['host'],
- existing_lun,
- )
-
+ msg = f"Volume '{volume_name}' was unmapped from host '{host_name}' freeing lun '{existing_lun}'"
except KeyError as err:
if 'has no logical units' not in str(err):
- module.fail_json('Cannot unmap volume {0} from host {1}: {2}'.format(
- module.params['volume'],
- module.params['host'],
- str(err)))
+ module.fail_json(f"Cannot unmap volume '{volume_name}' from host '{host_name}': {str(err)}")
else:
- msg = "Volume {0} was not mapped to host {1} and so unmapping was not executed".format(
- module.params['volume'],
- module.params['host'],
- )
+ msg = f"Volume '{volume_name}' was not mapped to host '{host_name}' and so unmapping was not executed"
else:
- msg = "Either volume {0} or host {1} does not exist. Unmapping was not executed".format(
- module.params['volume'],
- module.params['host'],
- )
+ msg = f"Either volume '{volume_name}' or host '{host_name}' does not exist. Unmapping was not executed"
else: # check_mode
changed = True
@@ -421,55 +385,31 @@ def delete_mapping_to_cluster(module, system):
if not module.check_mode:
volume = get_volume(module, system)
cluster = get_cluster(module, system)
+ volume_name = module.params['volume']
+ cluster_name = module.params['cluster']
if volume and cluster:
try:
existing_lun = find_cluster_lun(cluster, volume)
cluster.unmap_volume(volume)
changed = True
- msg = "Volume '{0}' was unmapped from cluster '{1}' freeing lun '{2}'".format(
- module.params['volume'],
- module.params['cluster'],
- existing_lun,
- )
+ msg = f"Volume '{volume_name}' was unmapped from cluster '{cluster_name}' freeing lun '{existing_lun}'"
except KeyError as err:
if 'has no logical units' not in str(err):
- module.fail_json('Cannot unmap volume {0} from cluster {1}: {2}'.format(
- module.params['volume'],
- module.params['cluster'],
- str(err)))
+ msg = f"Cannot unmap volume '{volume_name}' from cluster '{cluster_name}': {str(err)}"
+ module.fail_json(msg=msg)
else:
- msg = "Volume {0} was not mapped to cluster {1} and so unmapping was not executed".format(
- module.params['volume'],
- module.params['cluster'],
- )
+ msg = f"Volume '{volume_name}' was not mapped to cluster '{cluster_name}' and so unmapping was not executed"
else:
- msg = "Either volume {0} or cluster {1} does not exist. Unmapping was not executed".format(
- module.params['volume'],
- module.params['cluster'],
- )
+ msg = f"Either volume '{volume_name}' or cluster '{cluster_name}' does not exist. Unmapping was not executed"
else: # check_mode
changed = True
module.exit_json(msg=msg, changed=changed)
-def get_sys_vol_host_cluster(module):
- system = get_system(module)
- volume = get_volume(module, system)
- host = get_host(module, system)
- cluster = get_cluster(module, system)
- return (system, volume, host, cluster)
-
-
-def get_sys_vol_cluster(module):
- system = get_system(module)
- volume = get_volume(module, system)
- cluster = get_cluster(module, system)
- return (system, volume, cluster)
-
-
def get_mapping_fields(volume, host_or_cluster):
+ """ Get mapping fields """
luns = host_or_cluster.get_luns()
for lun in luns:
if volume.get_name() == lun.volume.get_name():
@@ -481,50 +421,56 @@ def get_mapping_fields(volume, host_or_cluster):
def handle_stat(module):
- system, volume, host, cluster = get_sys_vol_host_cluster(module)
+ """ Return mapping stat """
+ system = get_system(module)
+ volume = get_volume(module, system)
+ host = get_host(module, system)
+ cluster = get_cluster(module, system)
volume_name = module.params['volume']
host_name = module.params['host']
- if not host_name:
- host_name = "not specified"
-
cluster_name = module.params['cluster']
- if not cluster_name:
- cluster_name = "not specified"
if not volume:
- module.fail_json(msg='Volume {0} not found'.format(volume_name))
+ module.fail_json(msg=f"Volume '{volume_name}' not found")
+
if not host and not cluster:
- module.fail_json(msg='Neither host [{0}] nor cluster [{1}] found'.format(host_name, cluster_name))
+ msg = f"Neither host '{host_name}' nor cluster '{cluster_name}' found"
+ module.fail_json(msg=msg)
+
if (not host or not vol_is_mapped_to_host(volume, host)) \
and (not cluster or not vol_is_mapped_to_cluster(volume, cluster)):
- msg = 'Volume {0} is mapped to neither host {1} nor cluster {2}'.format(volume_name, host_name, cluster_name)
+ if host_name:
+ msg = f"Volume '{volume_name}' is not mapped to host '{host_name}'"
+ elif cluster_name:
+ msg = f"Volume '{volume_name}' is not mapped to cluster '{cluster_name}'"
module.fail_json(msg=msg)
- if host:
+
+ if host and host_name:
found_lun = find_host_lun(host, volume)
field_dict = get_mapping_fields(volume, host)
if found_lun is not None:
- msg = 'Volume {0} is mapped to host {1} using lun {2}'.format(volume_name, host_name, found_lun),
+ msg = f"Volume '{volume_name}' is mapped to host '{host_name}' using lun '{found_lun}'"
result = dict(
changed=False,
volume_lun=found_lun,
msg=msg,
)
else:
- msg = 'Volume {0} is not mapped to host {1}'.format(volume_name, host_name)
+ msg = f"Volume '{volume_name}' is not mapped to host '{host_name}'"
module.fail_json(msg=msg)
- elif cluster:
+ elif cluster and cluster_name:
found_lun = find_cluster_lun(cluster, volume)
field_dict = get_mapping_fields(volume, cluster)
if found_lun is not None:
- msg = 'Volume {0} is mapped to cluster {1} using lun {2}'.format(volume_name, cluster_name, found_lun)
+ msg = f"Volume '{volume_name}' is mapped to cluster '{cluster_name}' using lun '{found_lun}'"
result = dict(
changed=False,
volume_lun=found_lun,
msg=msg,
)
else:
- msg = 'Volume {0} is not mapped to cluster {1}'.format(volume_name, cluster_name)
+ msg = f"Volume '{volume_name}' is not mapped to cluster '{cluster_name}'"
module.fail_json(msg=msg)
else:
msg = 'A programming error has occurred in handle_stat()'
@@ -534,50 +480,38 @@ def handle_stat(module):
def handle_present(module):
- system, volume, host, cluster = get_sys_vol_host_cluster(module)
+ """ Create or update mapping """
+ system = get_system(module)
+ volume = get_volume(module, system)
+ host = get_host(module, system)
+ cluster = get_cluster(module, system)
volume_name = module.params['volume']
host_name = module.params['host']
cluster_name = module.params['cluster']
if not volume:
- module.fail_json(changed=False, msg='Volume {0} not found'.format(volume_name))
+ module.fail_json(changed=False, msg=f"Volume '{volume_name}' not found")
if not host and not cluster:
if not host_name:
host_name = "not specified"
if not cluster_name:
cluster_name = "not specified"
- module.fail_json(changed=False, msg='Neither host [{0}] nor cluster [{1}] found'.format(host_name, cluster_name))
+ module.fail_json(changed=False, msg=f"Neither host '{host_name}' nor cluster '{cluster_name}' found")
if host:
if not vol_is_mapped_to_host(volume, host):
changed = create_mapping(module, system)
- # TODO: Why is find_host_lun() returning None after creating the mapping?
- # host.get_luns() returns an empty list, why?
- # existing_lun = find_host_lun(host, volume)
- # msg = "Volume '{0}' map to host '{1}' created using lun '{2}'".format(
- # volume.get_name(),
- # host.get_name(),
- # existing_lun,
- # )
- msg = "Volume '{0}' map to host '{1}' created".format(volume_name, host_name)
+ msg = f"Volume '{volume_name}' map to host '{host_name}' created"
else:
changed = update_mapping_to_host(module, system)
existing_lun = find_host_lun(host, volume)
- msg = "Volume '{0}' map to host '{1}' already exists using lun '{2}'".format(volume_name, host_name, existing_lun)
+ msg = f"Volume '{volume_name}' map to host '{host_name}' already exists using lun '{existing_lun}'"
elif cluster:
if not vol_is_mapped_to_cluster(volume, cluster):
changed = create_mapping(module, system)
- # TODO: Why is find_host_lun() returning None after creating the mapping?
- # host.get_luns() returns an empty list, why?
- # existing_lun = find_host_lun(host, volume)
- # msg = "Volume '{0}' map to host '{1}' created using lun '{2}'".format(
- # volume.get_name(),
- # host.get_name(),
- # existing_lun,
- # )
- msg = "Volume '{0}' map to cluster '{1}' created".format(volume_name, cluster_name)
+ msg = f"Volume '{volume_name}' map to cluster '{cluster_name}' created"
else:
changed = update_mapping_to_cluster(module, system)
existing_lun = find_cluster_lun(cluster, volume)
- msg = "Volume '{0}' map to cluster '{1}' already exists using lun '{2}'".format(volume_name, cluster_name, existing_lun)
+ msg = f"Volume '{volume_name}' map to cluster '{cluster_name}' already exists using lun '{existing_lun}'"
result = dict(
changed=changed,
@@ -587,18 +521,23 @@ def handle_present(module):
def handle_absent(module):
- system, volume, host, cluster = get_sys_vol_host_cluster(module)
+ """ Remove mapping """
+ system = get_system(module)
+ volume = get_volume(module, system)
+ host = get_host(module, system)
+ cluster = get_cluster(module, system)
volume_name = module.params['volume']
host_name = module.params['host']
cluster_name = module.params['cluster']
if not volume or (not host and not cluster):
- module.exit_json(changed=False, msg='Mapping of volume {0} to host {1} or cluster {2} already absent'.format(volume_name, host_name, cluster_name))
+ module.exit_json(changed=False, msg=f'Mapping of volume {volume_name} to host {host_name} or cluster {cluster_name} already absent')
else:
changed = delete_mapping(module, system)
module.exit_json(changed=changed, msg="Mapping removed")
def execute_state(module):
+ """Determine which state function to execute and do so"""
state = module.params['state']
try:
if state == 'stat':
@@ -608,14 +547,14 @@ def execute_state(module):
elif state == 'absent':
handle_absent(module)
else:
- module.fail_json(msg='Internal handler error. Invalid state: {0}'.format(state))
+ module.fail_json(msg=f'Internal handler error. Invalid state: {state}')
finally:
system = get_system(module)
system.logout()
def check_parameters(module):
- volume_name = module.params['volume']
+ """Verify module options are sane"""
host_name = module.params['host']
cluster_name = module.params['cluster']
if host_name and cluster_name:
@@ -628,17 +567,15 @@ def check_parameters(module):
def main():
- """
- Gather auguments and manage mapping of vols to hosts.
- """
+ """ Main """
argument_spec = infinibox_argument_spec()
argument_spec.update(
dict(
- host=dict(required=False, default=""),
- cluster=dict(required=False, default=""),
+ host=dict(required=False, default=None),
+ cluster=dict(required=False, default=None),
state=dict(default='present', choices=['stat', 'present', 'absent']),
volume=dict(required=True),
- lun=dict(type=int),
+ lun=dict(type="int"),
)
)
diff --git a/ansible_collections/infinidat/infinibox/plugins/modules/infini_metadata.py b/ansible_collections/infinidat/infinibox/plugins/modules/infini_metadata.py
new file mode 100644
index 000000000..15374a52e
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/plugins/modules/infini_metadata.py
@@ -0,0 +1,674 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# pylint: disable=invalid-name,use-dict-literal,too-many-branches,too-many-locals,line-too-long,wrong-import-position
+
+"""This module creates, deletes or modifies metadata on Infinibox."""
+
+# Copyright: (c) 2024, Infinidat <info@infinidat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+---
+module: infini_metadata
+version_added: 2.13.0
+short_description: Create, Delete or Modify metadata on Infinibox
+description:
+ - This module creates, deletes or modifies metadata on Infinibox.
+ - Deleting metadata by object, without specifying a key, is not implemented for any object_type (e.g. DELETE api/rest/metadata/system).
+ - This would delete all metadata belonging to the object. Instead delete each key explicitely using its key name.
+author: David Ohlemacher (@ohlemacher)
+options:
+ object_type:
+ description:
+ - Type of object
+ type: str
+ required: true
+ choices: ["cluster", "fs", "fs-snap", "host", "pool", "system", "vol", "vol-snap"]
+ object_name:
+ description:
+ - Name of the object. Not used if object_type is system
+ type: str
+ required: false
+ key:
+ description:
+ - Name of the metadata key
+ type: str
+ required: true
+ value:
+ description:
+ - Value of the metadata key
+ type: str
+ required: false
+ state:
+ description:
+ - Creates/Modifies metadata when present or removes when absent.
+ type: str
+ required: false
+ default: present
+ choices: [ "stat", "present", "absent" ]
+
+extends_documentation_fragment:
+ - infinibox
+"""
+
+EXAMPLES = r"""
+- name: Create new metadata key foo with value bar
+ infini_metadata:
+ name: foo
+ key: bar
+ state: present
+ user: admin
+ password: secret
+ system: ibox001
+- name: Stat metadata key named foo
+ infini_metadata:
+ name: foo
+ state: stat
+ user: admin
+ password: secret
+ system: ibox001
+- name: Remove metadata keyn named foo
+ infini_vol:
+ name: foo_snap
+ state: absent
+ user: admin
+ password: secret
+ system: ibox001
+"""
+
+# RETURN = r''' # '''
+
+import json
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+from ansible_collections.infinidat.infinibox.plugins.module_utils.infinibox import (
+ HAS_INFINISDK,
+ api_wrapper,
+ get_cluster,
+ get_filesystem,
+ get_host,
+ get_pool,
+ get_system,
+ get_volume,
+ infinibox_argument_spec,
+)
+
+HAS_INFINISDK = True
+try:
+ from infinisdk.core.exceptions import APICommandFailed
+except ImportError:
+ HAS_INFINISDK = False
+
+HAS_CAPACITY = False
+
+
+@api_wrapper
+def get_metadata_vol(module, disable_fail):
+ """ Get metadata about a volume """
+ system = get_system(module)
+ object_type = module.params["object_type"]
+ object_name = module.params["object_name"]
+ key = module.params["key"]
+ metadata = None
+
+ vol = get_volume(module, system)
+ if vol:
+ path = f"metadata/{vol.id}/{key}"
+ try:
+ metadata = system.api.get(path=path)
+ except APICommandFailed:
+ if not disable_fail:
+ module.fail_json(
+ f"Cannot find {object_type} metadata key. "
+ f"Volume {object_name} key {key} not found"
+ )
+ elif not disable_fail:
+ msg = f"Volume with object name {object_name} not found. Cannot stat its metadata."
+ module.fail_json(msg=msg)
+
+ return metadata
+
+
+@api_wrapper
+def get_metadata_fs(module, disable_fail):
+ """ Get metadata about a fs """
+ system = get_system(module)
+ object_type = module.params["object_type"]
+ object_name = module.params["object_name"]
+ key = module.params["key"]
+ metadata = None
+
+ fs = get_filesystem(module, system)
+ if fs:
+ path = f"metadata/{fs.id}/{key}"
+ try:
+ metadata = system.api.get(path=path)
+ except APICommandFailed:
+ if not disable_fail:
+ module.fail_json(
+ f"Cannot find {object_type} metadata key. "
+ f"File system {object_name} key {key} not found"
+ )
+ elif not disable_fail:
+ msg = f"File system named {object_name} not found. Cannot stat its metadata."
+ module.fail_json(msg=msg)
+
+ return metadata
+
+
+@api_wrapper
+def get_metadata_host(module, disable_fail):
+ """ Get metadata about a host """
+ system = get_system(module)
+ object_type = module.params["object_type"]
+ object_name = module.params["object_name"]
+ key = module.params["key"]
+ metadata = None
+
+ host = get_host(module, system)
+ if host:
+ path = f"metadata/{host.id}/{key}"
+ try:
+ metadata = system.api.get(path=path)
+ except APICommandFailed:
+ if not disable_fail:
+ module.fail_json(
+ f"Cannot find {object_type} metadata key. "
+ f"Host {object_name} key {key} not found"
+ )
+ elif not disable_fail:
+ msg = f"Host named {object_name} not found. Cannot stat its metadata."
+ module.fail_json(msg=msg)
+
+ return metadata
+
+
+@api_wrapper
+def get_metadata_cluster(module, disable_fail):
+ """ Get metadata about a cluster """
+ system = get_system(module)
+ object_type = module.params["object_type"]
+ object_name = module.params["object_name"]
+ key = module.params["key"]
+ metadata = None
+
+ cluster = get_cluster(module, system)
+ if cluster:
+ path = f"metadata/{cluster.id}/{key}"
+ try:
+ metadata = system.api.get(path=path)
+ except APICommandFailed:
+ if not disable_fail:
+ module.fail_json(
+ f"Cannot find {object_type} metadata key. "
+ f"Cluster {object_name} key {key} not found"
+ )
+ elif not disable_fail:
+ msg = f"Cluster named {object_name} not found. Cannot stat its metadata."
+ module.fail_json(msg=msg)
+
+ return metadata
+
+
+@api_wrapper
+def get_metadata_fssnap(module, disable_fail):
+ """ Get metadata about a fs snapshot """
+ system = get_system(module)
+ object_type = module.params["object_type"]
+ object_name = module.params["object_name"]
+ key = module.params["key"]
+ metadata = None
+
+ fssnap = get_filesystem(module, system)
+ if fssnap:
+ path = f"metadata/{fssnap.id}/{key}"
+ try:
+ metadata = system.api.get(path=path)
+ except APICommandFailed:
+ if not disable_fail:
+ module.fail_json(
+ f"Cannot find {object_type} metadata key. "
+ f"File system snapshot {object_name} key {key} not found"
+ )
+ elif not disable_fail:
+ msg = f"File system snapshot named {object_name} not found. Cannot stat its metadata."
+ module.fail_json(msg=msg)
+
+ return metadata
+
+
+@api_wrapper
+def get_metadata_pool(module, disable_fail):
+ """ Get metadata about a pool """
+ system = get_system(module)
+ object_type = module.params["object_type"]
+ object_name = module.params["object_name"]
+ key = module.params["key"]
+ metadata = None
+
+ pool = get_pool(module, system)
+ if pool:
+ path = f"metadata/{pool.id}/{key}"
+ try:
+ metadata = system.api.get(path=path)
+ except APICommandFailed:
+ if not disable_fail:
+ module.fail_json(
+ f"Cannot find {object_type} metadata key. "
+ f"Pool {object_name} key {key} not found"
+ )
+ elif not disable_fail:
+ msg = f"Pool named {object_name} not found. Cannot stat its metadata."
+ module.fail_json(msg=msg)
+
+ return metadata
+
+
+@api_wrapper
+def get_metadata_volsnap(module, disable_fail):
+ """ Get metadata for a volume snapshot """
+ system = get_system(module)
+ object_type = module.params["object_type"]
+ object_name = module.params["object_name"]
+ key = module.params["key"]
+ metadata = None
+
+ volsnap = get_volume(module, system)
+ if volsnap:
+ path = f"metadata/{volsnap.id}/{key}"
+ try:
+ metadata = system.api.get(path=path)
+ except APICommandFailed:
+ if not disable_fail:
+ module.fail_json(
+ f"Cannot find {object_type} metadata key. "
+ f"Volume snapshot {object_name} key {key} not found"
+ )
+ elif not disable_fail:
+ msg = f"Volume snapshot named {object_name} not found. Cannot stat its metadata."
+ module.fail_json(msg=msg)
+
+ return metadata
+
+
+@api_wrapper
+def get_metadata(module, disable_fail=False):
+ """
+ Find and return metadata
+ Use disable_fail when we are looking for metadata
+ and it may or may not exist and neither case is an error.
+ """
+ system = get_system(module)
+ object_type = module.params["object_type"]
+ object_name = module.params["object_name"]
+ key = module.params["key"]
+
+ if object_type == "system":
+ path = f"metadata/{object_type}?key={key}"
+ metadata = system.api.get(path=path)
+ elif object_type == "fs":
+ metadata = get_metadata_fs(module, disable_fail)
+ elif object_type == "vol":
+ metadata = get_metadata_vol(module, disable_fail)
+ elif object_type == "host":
+ metadata = get_metadata_host(module, disable_fail)
+ elif object_type == "cluster":
+ metadata = get_metadata_cluster(module, disable_fail)
+ elif object_type == "fs-snap":
+ metadata = get_metadata_fs(module, disable_fail)
+ elif object_type == "pool":
+ metadata = get_metadata_pool(module, disable_fail)
+ elif object_type == "vol-snap":
+ metadata = get_metadata_volsnap(module, disable_fail)
+
+ else:
+ msg = f"Metadata for {object_type} not supported. Cannot stat."
+ module.fail_json(msg=msg)
+
+ if metadata:
+ result = metadata.get_result()
+ if not disable_fail and not result:
+ msg = f"Metadata for {object_type} with key {key} not found. Cannot stat."
+ module.fail_json(msg=msg)
+ return result
+
+ if disable_fail:
+ return None
+
+ msg = f"Metadata for {object_type} named {object_name} not found. Cannot stat."
+ module.fail_json(msg=msg)
+ return None # Quiet pylint
+
+
+@api_wrapper
+def put_metadata(module): # pylint: disable=too-many-statements
+ """Create metadata key with a value. The changed variable is found elsewhere."""
+ system = get_system(module)
+
+ object_type = module.params["object_type"]
+ key = module.params["key"]
+ value = module.params["value"]
+
+ # Could check metadata value size < 32k
+
+ if object_type == "system":
+ path = "metadata/system"
+ elif object_type == "vol":
+ vol = get_volume(module, system)
+ if not vol:
+ object_name = module.params["object_name"]
+ msg = f"Volume {object_name} not found. Cannot add metadata key {key}."
+ module.fail_json(msg=msg)
+ path = f"metadata/{vol.id}"
+ elif object_type == "fs":
+ fs = get_filesystem(module, system)
+ if not fs:
+ object_name = module.params["object_name"]
+ msg = f"File system {object_name} not found. Cannot add metadata key {key}."
+ module.fail_json(msg=msg)
+ path = f"metadata/{fs.id}"
+ elif object_type == "host":
+ host = get_host(module, system)
+ if not host:
+ object_name = module.params["object_name"]
+ msg = f"Cluster {object_name} not found. Cannot add metadata key {key}."
+ module.fail_json(msg=msg)
+ path = f"metadata/{host.id}"
+ elif object_type == "cluster":
+ cluster = get_cluster(module, system)
+ if not cluster:
+ object_name = module.params["object_name"]
+ msg = f"Cluster {object_name} not found. Cannot add metadata key {key}."
+ module.fail_json(msg=msg)
+ path = f"metadata/{cluster.id}"
+ elif object_type == "fs-snap":
+ fssnap = get_filesystem(module, system)
+ if not fssnap:
+ object_name = module.params["object_name"]
+ msg = f"File system snapshot {object_name} not found. Cannot add metadata key {key}."
+ module.fail_json(msg=msg)
+ path = f"metadata/{fssnap.id}"
+ elif object_type == "pool":
+ pool = get_pool(module, system)
+ if not pool:
+ object_name = module.params["object_name"]
+ msg = f"Pool {object_name} not found. Cannot add metadata key {key}."
+ module.fail_json(msg=msg)
+ path = f"metadata/{pool.id}"
+ elif object_type == "vol-snap":
+ volsnap = get_volume(module, system)
+ if not volsnap:
+ object_name = module.params["object_name"]
+ msg = f"Volume snapshot {object_name} not found. Cannot add metadata key {key}."
+ module.fail_json(msg=msg)
+ path = f"metadata/{volsnap.id}"
+
+ # Create json data
+ data = {
+ key: value
+ }
+
+ # Put
+ system.api.put(path=path, data=data)
+ # Variable 'changed' not returned by design
+
+
+@api_wrapper
+def delete_metadata(module): # pylint: disable=too-many-return-statements
+ """
+ Remove metadata key.
+ Not implemented by design: Deleting all of the system's metadata
+ using 'DELETE api/rest/metadata/system'.
+ """
+ system = get_system(module)
+ changed = False
+ object_type = module.params["object_type"]
+ key = module.params["key"]
+ if object_type == "system":
+ path = f"metadata/system/{key}"
+ elif object_type == "vol":
+ vol = get_volume(module, system)
+ if not vol:
+ changed = False
+ return changed # No vol therefore no metadata to delete
+ path = f"metadata/{vol.id}/{key}"
+ elif object_type == "fs":
+ fs = get_filesystem(module, system)
+ if not fs:
+ changed = False
+ return changed # No fs therefore no metadata to delete
+ path = f"metadata/{fs.id}/{key}"
+ elif object_type == "host":
+ host = get_host(module, system)
+ if not host:
+ changed = False
+ return changed # No host therefore no metadata to delete
+ path = f"metadata/{host.id}/{key}"
+ elif object_type == "cluster":
+ cluster = get_cluster(module, system)
+ if not cluster:
+ changed = False
+ return changed # No cluster therefore no metadata to delete
+ path = f"metadata/{cluster.id}/{key}"
+ elif object_type == "fs-snap":
+ fssnap = get_filesystem(module, system)
+ if not fssnap:
+ changed = False
+ return changed # No fssnap therefore no metadata to delete
+ path = f"metadata/{fssnap.id}/{key}"
+ elif object_type == "pool":
+ pool = get_pool(module, system)
+ if not pool:
+ changed = False
+ return changed # No pool therefore no metadata to delete
+ path = f"metadata/{pool.id}/{key}"
+ elif object_type == "vol-snap":
+ volsnap = get_volume(module, system)
+ if not volsnap:
+ changed = False
+ return changed # No volsnap therefore no metadata to delete
+ path = f"metadata/{volsnap.id}/{key}"
+ else:
+ module.fail_json(f"Object type {object_type} not supported")
+
+ try:
+ system.api.delete(path=path)
+ changed = True
+ except APICommandFailed as err:
+ if err.status_code != 404:
+ raise
+ return changed
+
+
+def handle_stat(module):
+ """Return metadata stat"""
+ object_type = module.params["object_type"]
+ key = module.params["key"]
+ metadata = get_metadata(module)
+ if object_type == "system":
+ metadata_id = metadata[0]["id"]
+ object_id = metadata[0]["object_id"]
+ value = metadata[0]["value"]
+ else:
+ metadata_id = metadata["id"]
+ object_id = metadata["object_id"]
+ value = metadata["value"]
+
+ result = {
+ "msg": "Metadata found",
+ "changed": False,
+ "object_type": object_type,
+ "key": key,
+ "id": metadata_id,
+ "object_id": object_id,
+ "value": value,
+ }
+ module.exit_json(**result)
+
+
+def handle_present(module):
+ """Make metadata present"""
+ changed = False
+ msg = "Metadata unchanged"
+ if not module.check_mode:
+ old_metadata = get_metadata(module, disable_fail=True)
+ put_metadata(module)
+ new_metadata = get_metadata(module)
+ changed = new_metadata != old_metadata
+ if changed:
+ msg = "Metadata changed"
+ else:
+ msg = "Metadata unchanged since the value is the same as the existing metadata"
+ module.exit_json(changed=changed, msg=msg)
+
+
+def handle_absent(module):
+ """Make metadata absent"""
+ msg = "Metadata unchanged"
+ changed = False
+ if not module.check_mode:
+ changed = delete_metadata(module)
+ if changed:
+ msg = "Metadata removed"
+ else:
+ msg = "Metadata did not exist so no removal was necessary"
+ module.exit_json(changed=changed, msg=msg)
+
+
+def execute_state(module):
+ """Determine which state function to execute and do so"""
+ state = module.params["state"]
+ try:
+ if state == "stat":
+ handle_stat(module)
+ elif state == "present":
+ handle_present(module)
+ elif state == "absent":
+ handle_absent(module)
+ else:
+ module.fail_json(msg=f"Internal handler error. Invalid state: {state}")
+ finally:
+ system = get_system(module)
+ system.logout()
+
+
+def check_options(module):
+ """Verify module options are sane"""
+ state = module.params["state"]
+ object_type = module.params["object_type"]
+ object_name = module.params["object_name"]
+
+ # Check object_type
+ object_types = [
+ "cluster",
+ "fs",
+ "fs-snap",
+ "host",
+ "pool",
+ "system",
+ "vol",
+ "vol-snap",
+ ]
+ if object_type not in object_types:
+ module.fail_json(
+ f"Cannot create {object_type} metadata. Object type must be one of {object_types}"
+ )
+
+ # Check object_name
+ if object_type == "system":
+ if object_name:
+ module.fail_json("An object_name for object_type system must not be provided.")
+ else:
+ if not object_name:
+ module.fail_json(
+ f"The name of the {object_type} must be provided as object_name."
+ )
+
+ key = module.params["key"]
+ if not key:
+ module.fail_json(f"Cannot create a {object_type} metadata key without providing a key name")
+
+ if state == "stat":
+ pass
+ elif state == "present":
+ # Check value
+ key = module.params["key"]
+ value = module.params["value"]
+ if not value:
+ module.fail_json(
+ f"Cannot create a {object_type} metadata key {key} without providing a value"
+ )
+ # Check system object_type
+ if object_type == "system":
+ if key == "ui-dataset-default-provisioning":
+ values = ["THICK", "THIN"]
+ if value not in values:
+ module.fail_json(
+ f"Cannot create {object_type} metadata for key {key}. "
+ f"Value must be one of {values}. Invalid value: {value}."
+ )
+
+ # Convert bool string to bool
+ if key in [
+ "ui-dataset-base2-units",
+ "ui-feedback-dialog",
+ "ui-feedback-form",
+ ]:
+ try:
+ module.params["value"] = json.loads(value.lower())
+ except json.decoder.JSONDecodeError:
+ module.fail_json(
+ f"Cannot create {object_type} metadata for key {key}. "
+ f"Value must be able to be decoded as a boolean. Invalid value: {value}."
+ )
+
+ # Convert integer string to int
+ if key in [
+ "ui-bulk-volume-zero-padding",
+ "ui-table-export-limit"
+ ]:
+ try:
+ module.params["value"] = json.loads(value.lower())
+ except json.decoder.JSONDecodeError:
+ module.fail_json(
+ f"Cannot create {object_type} metadata for key {key}. "
+ f"Value must be of type integer. Invalid value: {value}."
+ )
+
+ elif state == "absent":
+ pass
+ else:
+ module.fail_json(f"Invalid state '{state}' provided")
+
+
+def main():
+ """ Main """
+ argument_spec = infinibox_argument_spec()
+
+ argument_spec.update(
+ {
+ "object_type": {"required": True, "choices": ["cluster", "fs", "fs-snap", "host", "pool", "system", "vol", "vol-snap"]},
+ "object_name": {"required": False, "default": None},
+ "key": {"required": True, "no_log": False},
+ "value": {"required": False, "default": None},
+ "state": {"default": "present", "choices": ["stat", "present", "absent"]},
+ }
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_INFINISDK:
+ module.fail_json(msg=missing_required_lib("infinisdk"))
+
+ check_options(module)
+ execute_state(module)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/infinidat/infinibox/plugins/modules/infini_network_space.py b/ansible_collections/infinidat/infinibox/plugins/modules/infini_network_space.py
index 013d86e5e..f9f02e11d 100644
--- a/ansible_collections/infinidat/infinibox/plugins/modules/infini_network_space.py
+++ b/ansible_collections/infinidat/infinibox/plugins/modules/infini_network_space.py
@@ -1,9 +1,13 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# Copyright: (c) 2022, Infinidat <info@infinidat.com>
+# pylint: disable=invalid-name,use-list-literal,use-dict-literal,line-too-long,wrong-import-position
+
+# Copyright: (c) 2024, Infinidat <info@infinidat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""This module creates, deletes or modifies network spaces on Infinibox."""
+
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
@@ -20,25 +24,35 @@ options:
name:
description:
- Network space name
+ type: str
required: true
state:
description:
- Creates/Modifies network spaces when present. Removes when absent. Shows status when stat.
+ type: str
required: false
default: present
choices: [ "stat", "present", "absent" ]
interfaces:
description:
- - A list of interfaces for the space.
+ - A list of interface IDs for the space.
required: false
type: list
- elements: str
+ elements: int
+ default: []
+ network_config:
+ description:
+ - A network description.
+ type: dict
+ default: {}
+ required: false
service:
description:
- Choose a service.
+ type: str
required: false
- default: "replication"
- choices: ["replication", "NAS", "iSCSI"]
+ default: "RMR_SERVICE"
+ choices: ["RMR_SERVICE", "NAS_SERVICE", "ISCSI_SERVICE"]
mtu:
description:
- Set an MTU. If not specified, defaults to 1500 bytes.
@@ -54,6 +68,11 @@ options:
- Network mask.
required: false
type: int
+ default_gateway:
+ description:
+ - Default gateway.
+ type: str
+ required: false
ips:
description:
- List of IPs.
@@ -69,7 +88,12 @@ options:
- It does not affect sync-replication or active-active traffic.
required: false
type: int
-
+ async_only:
+ description:
+ - Run asynchronously only.
+ required: false
+ type: bool
+ default: false
extends_documentation_fragment:
- infinibox
'''
@@ -80,20 +104,20 @@ EXAMPLES = r'''
name: iSCSI
state: present
interfaces:
- - 1680
- - 1679
- - 1678
+ - 1680
+ - 1679
+ - 1678
service: ISCSI_SERVICE
netmask: 19
network: 172.31.32.0
default_gateway: 172.31.63.254
ips:
- - 172.31.32.145
- - 172.31.32.146
- - 172.31.32.147
- - 172.31.32.148
- - 172.31.32.149
- - 172.31.32.150
+ - 172.31.32.145
+ - 172.31.32.146
+ - 172.31.32.147
+ - 172.31.32.148
+ - 172.31.32.149
+ - 172.31.32.150
user: admin
password: secret
system: ibox001
@@ -103,28 +127,24 @@ EXAMPLES = r'''
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-import traceback
-
from ansible_collections.infinidat.infinibox.plugins.module_utils.infinibox import (
HAS_INFINISDK,
api_wrapper,
infinibox_argument_spec,
get_system,
- unixMillisecondsToDate,
merge_two_dicts,
get_net_space,
)
try:
from infinisdk.core.exceptions import APICommandFailed
- from infinisdk.core.exceptions import ObjectNotFound
- from infi.dtypes.iqn import make_iscsi_name
except ImportError:
pass # Handled by HAS_INFINISDK from module_utils
@api_wrapper
def create_empty_network_space(module, system):
+ """ Create an empty network space """
# Create network space
network_space_name = module.params["name"]
service = module.params["service"]
@@ -137,9 +157,7 @@ def create_empty_network_space(module, system):
}
interfaces = module.params["interfaces"]
- # print("Creating network space {0}".format(network_space_name))
- product_id = system.api.get('system/product_id')
- # print("api: {0}".format(product_id.get_result()))
+ # product_id = system.api.get('system/product_id')
net_create_url = "network/spaces"
net_create_data = {
@@ -153,11 +171,13 @@ def create_empty_network_space(module, system):
if mtu:
net_create_data["mtu"] = mtu
- net_create = system.api.post(
- path=net_create_url,
- data=net_create_data
- )
- # print("net_create: {0}".format(net_create))
+ try:
+ system.api.post(
+ path=net_create_url,
+ data=net_create_data
+ )
+ except APICommandFailed as err:
+ module.fail_json(msg=f"Cannot create empty network space {network_space_name}: {err}")
@api_wrapper
@@ -166,36 +186,33 @@ def find_network_space_id(module, system):
Find the ID of this network space
"""
network_space_name = module.params["name"]
- net_id_url = "network/spaces?name={0}&fields=id".format(network_space_name)
+ net_id_url = f"network/spaces?name={network_space_name}&fields=id"
net_id = system.api.get(
path=net_id_url
)
result = net_id.get_json()['result'][0]
space_id = result['id']
- # print("Network space has ID {0}".format(space_id))
return space_id
@api_wrapper
def add_ips_to_network_space(module, system, space_id):
+ """ Add IPs to space. Ignore address conflict errors. """
network_space_name = module.params["name"]
- # print("Adding IPs to network space {0}".format(network_space_name))
-
ips = module.params["ips"]
for ip in ips:
- ip_url = "network/spaces/{0}/ips".format(space_id)
+ ip_url = f"network/spaces/{space_id}/ips"
ip_data = ip
- ip_add = system.api.post(
- path=ip_url,
- data=ip_data
- )
- # print("add_ips json: {0}".format(ip_add.get_json()))
- result = ip_add.get_json()['result']
- # print("add ip result: {0}".format(result))
+ try:
+ system.api.post(path=ip_url, data=ip_data)
+ except APICommandFailed as err:
+ if err.error_code != "NET_SPACE_ADDRESS_CONFLICT": # Ignore
+ module.fail_json(msg=f"Cannot add IP {ip} to network space {network_space_name}: {err}")
@api_wrapper
def create_network_space(module, system):
+ """ Create a network space """
if not module.check_mode:
# Create space
create_empty_network_space(module, system)
@@ -214,54 +231,43 @@ def create_network_space(module, system):
def update_network_space(module, system):
"""
Update network space.
- TODO - This is incomplete and will not update the space.
- It will instead return changed=False and a message.
- To implement this we will need to find the existing space.
- For each field that we support updating, we need to compare existing
- to new values and if different update. We will need to iterate
- over the settings or we will receive:
- Status: 400
- Code: NOT_SUPPORTED_MULTIPLE_UPDATE
+ Update fields individually. If grouped the API will generate
+ a NOT_SUPPORTED_MULTIPLE_UPDATE error.
"""
- changed = False
- msg = "Update is not supported yet"
- module.exit_json(changed=changed, msg=msg)
-
- # TODO Everything below is incomplete
- # Update network space
- network_space_name = module.params["name"]
- service = module.params["service"]
- network_config = {
- "netmask": module.params["netmask"],
- "network": module.params["network"],
- "default_gateway": module.params["default_gateway"],
- }
- interfaces = module.params["interfaces"]
-
- # print("Updating network space {0}".format(network_space_name))
-
- # Find space's ID
space_id = find_network_space_id(module, system)
-
- net_url = "network/spaces/{0}".format(space_id)
- net_data = {
- "name": network_space_name,
- "service": service,
- "network_config": network_config,
- "interfaces": interfaces,
- }
-
- # Find existing space
- net_existing = system.api.get(path=net_url)
-
- net_update = system.api.put(
- path=net_url,
- data=net_data
- )
- # print("net_update: {0}".format(net_update))
+ datas = [
+ {"interfaces": module.params["interfaces"]},
+ {"mtu": module.params["mtu"]},
+ {"network_config":
+ {
+ "default_gateway": module.params["default_gateway"],
+ "netmask": module.params["netmask"],
+ "network": module.params["network"],
+ }
+ },
+ {"rate_limit": module.params["rate_limit"]},
+ {"properties":
+ {
+ "is_async_only": module.params["async_only"],
+ }
+ },
+ ]
+ for data in datas:
+ try:
+ system.api.put(
+ path=f"network/spaces/{space_id}",
+ data=data
+ )
+ except APICommandFailed as err:
+ msg = f"Cannot update network space: {err}"
+ module.fail_json(msg=msg)
+ add_ips_to_network_space(module, system, space_id)
+ changed = True
+ return changed
-def get_network_space_fields(module, network_space):
+def get_network_space_fields(network_space):
+ """ Get the network space fields and return as a dict """
fields = network_space.get_fields(from_cache=True, raw_value=True)
field_dict = dict(
@@ -282,16 +288,18 @@ def get_network_space_fields(module, network_space):
def handle_stat(module):
+ """ Return details about the space """
network_space_name = module.params["name"]
system = get_system(module)
net_space = get_net_space(module, system)
if not net_space:
- module.fail_json(msg="Network space {0} not found".format(network_space_name))
+ module.fail_json(msg=f"Network space {network_space_name} not found")
- field_dict = get_network_space_fields(module, net_space)
+ field_dict = get_network_space_fields(net_space)
result = dict(
- changed=False, msg="Network space {0} stat found".format(network_space_name)
+ changed=False,
+ msg=f"Network space {network_space_name} stat found"
)
result = merge_two_dicts(result, field_dict)
module.exit_json(**result)
@@ -305,14 +313,41 @@ def handle_present(module):
system = get_system(module)
net_space = get_net_space(module, system)
if net_space:
- changed = update_network_space(module, net_space)
- msg = "Host {0} updated".format(network_space_name)
+ changed = update_network_space(module, system)
+ msg = f"Network space named {network_space_name} updated"
else:
changed = create_network_space(module, system)
- msg = "Network space {0} created".format(network_space_name)
+ msg = f"Network space named {network_space_name} created"
module.exit_json(changed=changed, msg=msg)
+def disable_and_delete_ip(module, network_space, ip):
+ """
+ Disable and delete a network space IP
+ """
+ if not ip:
+ return # Nothing to do
+ addr = ip['ip_address']
+ network_space_name = module.params["name"]
+ ip_type = ip['type']
+ mgmt = ""
+ if ip_type == "MANAGEMENT":
+ mgmt = "management " # Trailing space by design
+
+ try:
+ try:
+ network_space.disable_ip_address(addr)
+ except APICommandFailed as err:
+ if err.error_code == "IP_ADDRESS_ALREADY_DISABLED":
+ pass
+ else:
+ module.fail_json(msg=f"Disabling of network space {network_space_name} IP {mgmt}{addr} API command failed")
+
+ network_space.remove_ip_address(addr)
+ except Exception as err: # pylint: disable=broad-exception-caught
+ module.fail_json(msg=f"Disabling or removal of network space {network_space_name} IP {mgmt}{addr} failed: {err}")
+
+
def handle_absent(module):
"""
Remove a namespace. First, may disable and remove the namespace's IPs.
@@ -322,55 +357,34 @@ def handle_absent(module):
network_space = get_net_space(module, system)
if not network_space:
changed = False
- msg = "Network space {0} already absent".format(network_space_name)
+ msg = f"Network space {network_space_name} already absent"
else:
# Find IPs from space
ips = list(network_space.get_ips())
# Disable and delete IPs from space
if not module.check_mode:
+ management_ip = None # Must be disabled and deleted last
for ip in ips:
- addr = ip["ip_address"]
-
- # print("Disabling IP {0}".format(addr))
- try:
- network_space.disable_ip_address(addr)
- except APICommandFailed as err:
- if err.error_code == "IP_ADDRESS_ALREADY_DISABLED":
- pass
- # print("Already disabled IP {0}".format(addr))
- else:
- # print("Failed to disable IP {0}".format(addr))
- module.fail_json(
- msg="Disabling of network space {0} IP {1} failed".format(
- network_space_name, addr
- )
- )
-
- # print("Removing IP {0}".format(addr))
- try:
- network_space.remove_ip_address(addr)
- except Exception as err:
- module.fail_json(
- msg="Removal of network space {0} IP {1} failed: {2}".format(
- network_space_name, addr, err
- )
- )
+ if ip['type'] == 'MANAGEMENT':
+ management_ip = ip
+ continue
+ disable_and_delete_ip(module, network_space, ip)
+ disable_and_delete_ip(module, network_space, management_ip)
# Delete space
network_space.delete()
changed = True
- msg = "Network space {0} removed".format(network_space_name)
+ msg = f"Network space {network_space_name} removed"
else:
changed = False
- msg = "Network space {0} not altered due to checkmode".format(
- network_space_name
- )
+ msg = f"Network space {network_space_name} not altered due to checkmode"
module.exit_json(changed=changed, msg=msg)
def execute_state(module):
+ """ Execute a state """
state = module.params["state"]
try:
if state == "stat":
@@ -381,7 +395,7 @@ def execute_state(module):
handle_absent(module)
else:
module.fail_json(
- msg="Internal handler error. Invalid state: {0}".format(state)
+ msg=f"Internal handler error. Invalid state: {state}"
)
finally:
system = get_system(module)
@@ -389,6 +403,7 @@ def execute_state(module):
def main():
+ """ Main """
argument_spec = infinibox_argument_spec()
argument_spec.update(
dict(
@@ -397,18 +412,19 @@ def main():
default="present", required=False, choices=["stat", "present", "absent"]
),
service=dict(
- default="replication",
+ default="RMR_SERVICE",
required=False,
- choices=["replication", "NAS_SERVICE", "ISCSI_SERVICE"],
+ choices=["RMR_SERVICE", "NAS_SERVICE", "ISCSI_SERVICE"],
),
- mtu=dict(default=None, required=False, type=int),
+ mtu=dict(default=None, required=False, type="int"),
network=dict(default=None, required=False),
- netmask=dict(default=None, required=False, type=int),
+ netmask=dict(default=None, required=False, type="int"),
default_gateway=dict(default=None, required=False),
interfaces=dict(default=list(), required=False, type="list", elements="int"),
- network_config=dict(default=dict(), required=False, type=dict),
+ network_config=dict(default=dict(), required=False, type="dict"),
ips=dict(default=list(), required=False, type="list", elements="str"),
- rate_limit=dict(default=None, required=False, type=int),
+ rate_limit=dict(default=None, required=False, type="int"),
+ async_only=dict(default=False, required=False, type="bool"),
)
)
diff --git a/ansible_collections/infinidat/infinibox/plugins/modules/infini_notification_rule.py b/ansible_collections/infinidat/infinibox/plugins/modules/infini_notification_rule.py
new file mode 100644
index 000000000..1916cdb49
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/plugins/modules/infini_notification_rule.py
@@ -0,0 +1,360 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# pylint: disable=invalid-name,use-dict-literal,line-too-long,wrong-import-position
+
+"""This module creates, deletes or modifies metadata on Infinibox."""
+
+# Copyright: (c) 2024, Infinidat <info@infinidat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+---
+module: infini_notification_rule
+version_added: 2.13.0
+short_description: Config notification rules
+description:
+ - This module config notification rules on Infinibox
+author: Wei Wang (@wwang)
+options:
+ name:
+ description:
+ - Name of the rule
+ type: str
+ required: true
+ event_level:
+ description:
+ - Event levels
+ type: list
+ elements: str
+ required: false
+ default: []
+ include_events:
+ description:
+ - Included events
+ type: list
+ elements: str
+ required: false
+ default: []
+ exclude_events:
+ description:
+ - Exclued events
+ type: list
+ elements: str
+ required: false
+ default: []
+ recipients:
+ description:
+ - Email list of the recipients
+ - Recipients and target are exclusive to each other, i.e. only recipients or target
+ should be used, don't use both at the same time.
+ type: list
+ elements: str
+ required: false
+ default: []
+ target:
+ description:
+ - Notification target
+ - Recipients and target are exclusive to each other, i.e. only recipients or target
+ should be used, don't use both at the same time.
+ type: str
+ required: false
+ state:
+ description:
+ - Query or modifies config.
+ type: str
+ required: false
+ default: present
+ choices: [ "stat", "present", "absent" ]
+
+extends_documentation_fragment:
+ - infinibox
+"""
+
+EXAMPLES = r"""
+- name: Create a new notification rule to a target
+ infini_notification_rule:
+ name: "test-rule-to-target" # this need to be uniq
+ event_level:
+ - ERROR
+ - CRITICAL
+ include_events:
+ - ACTIVATION_PAUSED
+ exclude_events:
+ - ACTIVE_DIRECTORY_ALL_DOMAIN_CONTROLLERS_DOWN
+ - ACTIVE_DIRECTORY_LEFT
+ target: testgraylog1
+ state: "present"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+"""
+
+# RETURN = r''' # '''
+
+# -*- coding: utf-8 -*-
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+HAS_ARROW = False
+
+try:
+ from ansible_collections.infinidat.infinibox.plugins.module_utils.infinibox import (
+ HAS_INFINISDK,
+ api_wrapper,
+ infinibox_argument_spec,
+ get_system,
+ )
+except ModuleNotFoundError:
+ from infinibox import ( # Used when hacking
+ HAS_INFINISDK,
+ api_wrapper,
+ infinibox_argument_spec,
+ get_system,
+ )
+
+
+@api_wrapper
+def find_target_id(module, system):
+ """ Find the ID of the target by name """
+ target = module.params["target"]
+ path = f"notifications/targets?name={target}&fields=id"
+ api_result = system.api.get(
+ path=path
+ )
+ if len(api_result.get_json()['result']) > 0:
+ result = api_result.get_json()['result'][0]
+ target_id = result['id']
+ else:
+ target_id = None
+ return target_id
+
+
+@api_wrapper
+def get_rules(module):
+ """ Get all rules """
+ system = get_system(module)
+ path = "notifications/rules"
+ rules = system.api.get(path=path)
+ return rules
+
+
+@api_wrapper
+def find_rule_id(module, system):
+ """ Find the ID of the rule by name """
+ rule_name = module.params["name"]
+ path = f"notifications/rules?name={rule_name}&fields=id"
+ api_result = system.api.get(
+ path=path
+ )
+ if len(api_result.get_json()['result']) > 0:
+ result = api_result.get_json()['result'][0]
+ rule_id = result['id']
+ else:
+ rule_id = None
+ return rule_id
+
+
+@api_wrapper
+def delete_rule(module):
+ """ Delete a notification rule """
+ system = get_system(module)
+ rule_id = find_rule_id(module, system)
+ path = f"notifications/rules/{rule_id}?approved=true"
+ system.api.delete(path=path)
+
+
+@api_wrapper
+def create_rule(module):
+ """ Create a new notifition rule """
+ system = get_system(module)
+ name = module.params["name"]
+ event_level = module.params["event_level"]
+ include_events = module.params["include_events"]
+ exclude_events = module.params["exclude_events"]
+ recipients = module.params["recipients"]
+ target = module.params["target"]
+ path = "notifications/rules"
+
+ json_data = {
+ "name": name,
+ "event_level": event_level,
+ "include_events": include_events,
+ "exclude_events": exclude_events,
+ }
+
+ if recipients:
+ target_parameters = {
+ "recipients": recipients
+ }
+ target_id = 3 # Target ID for sending to recipients
+ json_data["target_parameters"] = target_parameters
+ elif target:
+ target_id = find_target_id(module, system)
+ else:
+ msg = "Neither recipients nor target parameters specified"
+ module.fail_json(msg=msg)
+
+ json_data["target_id"] = target_id
+
+ system.api.post(path=path, data=json_data)
+
+
+@api_wrapper
+def update_rule(module):
+ """
+ Update an existing rule.
+ """
+ system = get_system(module)
+ name = module.params["name"]
+ event_level = module.params["event_level"]
+ include_events = module.params["include_events"]
+ exclude_events = module.params["exclude_events"]
+ recipients = module.params["recipients"]
+ target = module.params["target"]
+
+ json_data = {
+ "name": name,
+ "event_level": event_level,
+ "include_events": include_events,
+ "exclude_events": exclude_events,
+ }
+
+ if recipients:
+ target_parameters = {
+ "recipients": recipients
+ }
+ target_id = 3 # Target ID for sending to recipients
+ json_data["target_parameters"] = target_parameters
+ elif target:
+ target_id = find_target_id(module, system)
+ else:
+ msg = "Neither recipients nor target parameters specified"
+ module.fail_json(msg=msg)
+
+ json_data["target_id"] = target_id
+ rule_id = find_rule_id(module, system)
+ path = f"notifications/rules/{rule_id}"
+ system.api.put(path=path, data=json_data)
+
+
+def handle_present(module):
+ """ Create or update a rule """
+ system = get_system(module)
+ name = module.params["name"]
+ changed = False
+ if not module.check_mode:
+ rule_id = find_rule_id(module, system)
+ if not rule_id:
+ create_rule(module)
+ changed = True
+ msg = f"Rule named {name} created"
+ else:
+ update_rule(module)
+ msg = f"Rule named {name} updated"
+ changed = True
+
+ module.exit_json(changed=changed, msg=msg)
+
+
+def handle_stat(module):
+ """ Return rule stat """
+ result = None
+ system = get_system(module)
+ name = module.params['name']
+ rule_id = find_rule_id(module, system)
+ if rule_id:
+ path = f"notifications/rules/{rule_id}"
+ api_result = system.api.get(path=path)
+ result = api_result.get_json()['result']
+ result["rule_id"] = result.pop("id") # Rename id to rule_id
+ result["msg"] = f"Stat for notification rule named {name}"
+ result["changed"] = False
+ module.exit_json(**result)
+ msg = f"Notification rule {name} not found"
+ module.fail_json(msg=msg)
+
+
+def handle_absent(module):
+ """ Remove rule """
+ changed = False
+ name = module.params["name"]
+ system = get_system(module)
+
+ rule_id = find_rule_id(module, system)
+ if not rule_id:
+ msg = f"Rule named {name} not found. Deletion not required."
+ changed = False
+ else:
+ msg = f"Rule named {name} has been deleted"
+ changed = True
+ if not module.check_mode:
+ delete_rule(module)
+
+ module.exit_json(changed=changed, msg=msg)
+
+
+def execute_state(module):
+ """Determine which state function to execute and do so"""
+ state = module.params["state"]
+ try:
+ if state == "stat":
+ handle_stat(module)
+ elif state == "present":
+ handle_present(module)
+ elif state == "absent":
+ handle_absent(module)
+ else:
+ module.fail_json(msg=f"Internal handler error. Invalid state: {state}")
+ finally:
+ system = get_system(module)
+ system.logout()
+
+
+def check_options(module):
+ """Verify module options are sane"""
+ recipients = module.params['recipients']
+ target = module.params['target']
+ if recipients and target:
+ msg = "Cannot specify both recipients and target parameters"
+ module.fail_json(msg=msg)
+ if recipients:
+ for recipient in recipients:
+ if len(recipient) == 1:
+ msg = f"{recipient} is an invalid email address. Recipients '{recipients}' must be provided as a list, e.g. '[ \"user@example.com\" ]'"
+ module.fail_json(msg=msg)
+ if '@' not in recipient:
+ msg = f"{recipient} is an invalid email address"
+ module.fail_json(msg=msg)
+
+
+def main():
+ """Main module function"""
+ argument_spec = infinibox_argument_spec()
+
+ argument_spec.update(
+ {
+ "name": {"required": True},
+ "event_level": {"required": False, "default": [], "type": "list", "elements": "str"},
+ "include_events": {"required": False, "default": [], "type": "list", "elements": "str"},
+ "exclude_events": {"required": False, "default": [], "type": "list", "elements": "str"},
+ "recipients": {"required": False, "default": [], "type": "list", "elements": "str"},
+ "target": {"required": False, "type": "str", "default": None},
+ "state": {"default": "present", "choices": ["stat", "present", "absent"]},
+ }
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_INFINISDK:
+ module.fail_json(msg=missing_required_lib("infinisdk"))
+
+ check_options(module)
+ execute_state(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/infinidat/infinibox/plugins/modules/infini_notification_target.py b/ansible_collections/infinidat/infinibox/plugins/modules/infini_notification_target.py
new file mode 100644
index 000000000..a9e707289
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/plugins/modules/infini_notification_target.py
@@ -0,0 +1,361 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# pylint: disable=invalid-name,use-dict-literal,line-too-long,wrong-import-position
+
+"""This module creates, deletes or modifies metadata on Infinibox."""
+
+# Copyright: (c) 2024, Infinidat <info@infinidat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+---
+module: infini_notification_target
+version_added: 2.13.0
+short_description: Config notification target
+description:
+ - This module configures syslog notification targets on an Infinibox
+author: Wei Wang (@wwang)
+options:
+ name:
+ description:
+ - Name of the syslog target
+ type: str
+ required: true
+ host:
+ description:
+ - Host name or IP address of the target
+ type: str
+ required: false
+ port:
+ description:
+ - Port of the target
+ type: int
+ required: false
+ default: 514
+ transport:
+ description:
+ - TCP or UDP
+ type: str
+ required: false
+ choices:
+ - UDP
+ - TCP
+ default: UDP
+ protocol:
+ description:
+ - Protocol used for this target. Currently, the only valid value is SYSLOG.
+ type: str
+ required: false
+ choices:
+ - SYSLOG
+ default: SYSLOG
+ facility:
+ description:
+ - Facility
+ choices:
+ - LOCAL0
+ - LOCAL1
+ - LOCAL2
+ - LOCAL3
+ - LOCAL4
+ - LOCAL5
+ - LOCAL6
+ - LOCAL7
+ type: str
+ required: false
+ default: LOCAL7
+ visibility:
+ description:
+ - Visibility
+ type: str
+ choices:
+ - CUSTOMER
+ - INFINIDAT
+ required: false
+ default: CUSTOMER
+ post_test:
+ description:
+ - Run a test after new target is created
+ type: bool
+ required: false
+ default: true
+ state:
+ description:
+ - Query or modifies target
+ type: str
+ required: false
+ default: present
+ choices: [ "stat", "present", "absent" ]
+
+extends_documentation_fragment:
+ - infinibox
+"""
+
+EXAMPLES = r"""
+- name: Create notification targets
+ infini_notification_target:
+ state: present
+ name: testgraylog1
+ protocol: SYSLOG
+ host: 172.31.77.214
+ port: 8067
+ facility: LOCAL7
+ transport: TCP
+ visibility: CUSTOMER
+ post_test: true
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+- name: Create a new notification rule to a target
+ infini_notification_rule:
+ name: "test-rule-to-target" # this need to be uniq
+ event_level:
+ - ERROR
+ - CRITICAL
+ include_events:
+ - ACTIVATION_PAUSED
+ exclude_events:
+ - ACTIVE_DIRECTORY_ALL_DOMAIN_CONTROLLERS_DOWN
+ - ACTIVE_DIRECTORY_LEFT
+ target: testgraylog1
+ state: "present"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ system: "{{ system }}"
+"""
+
+# RETURN = r''' # '''
+
+# -*- coding: utf-8 -*-
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+from ansible_collections.infinidat.infinibox.plugins.module_utils.infinibox import (
+ HAS_INFINISDK,
+ api_wrapper,
+ infinibox_argument_spec,
+ get_system,
+ merge_two_dicts,
+)
+
+try:
+ from infinisdk.core.exceptions import APICommandFailed
+except ImportError:
+ pass # Handled by HAS_INFINISDK from module_utils
+
+
+@api_wrapper
+def get_target(module):
+ """
+ Find and return config setting value
+ Use disable_fail when we are looking for config
+ and it may or may not exist and neither case is an error.
+ """
+ name = module.params['name']
+ path = f"notifications/targets?name={name}"
+ system = get_system(module)
+
+ try:
+ target = system.api.get(path=path)
+ except APICommandFailed as err:
+ msg = f"Cannot find notification target {name}: {err}"
+ module.fail_json(msg=msg)
+
+ if not target:
+ msg = f"Users repository {name} not found. Cannot stat."
+ module.fail_json(msg=msg)
+ result = target.get_result()
+ return result
+
+
+def handle_stat(module):
+ """Return config stat"""
+ name = module.params['name']
+ try:
+ result = get_target(module)[0]
+ except IndexError:
+ module.fail_json(f"Cannot stat notification target {name}. Target not found.")
+ result2 = {
+ "changed": False,
+ "msg": f"Found notification target {name}",
+ }
+ result = merge_two_dicts(result, result2)
+ module.exit_json(**result)
+
+
+@api_wrapper
+def find_target_id(module, system):
+ """ Find the ID of the target by name """
+ target_name = module.params["name"]
+
+ try:
+ path = f"notifications/targets?name={target_name}&fields=id"
+ api_result = system.api.get(path=path)
+ except APICommandFailed as err:
+ msg = f"Cannot find ID for notification target {target_name}: {err}"
+ module.fail_json(msg=msg)
+
+ if len(api_result.get_json()['result']) > 0:
+ result = api_result.get_json()['result'][0]
+ target_id = result['id']
+ else:
+ target_id = None
+ return target_id
+
+
+@api_wrapper
+def delete_target(module):
+ """ Delete a notification target """
+ system = get_system(module)
+ name = module.params["name"]
+ target_id = find_target_id(module, system)
+
+ try:
+ path = f"notifications/targets/{target_id}?approved=true"
+ system.api.delete(path=path)
+ except APICommandFailed as err:
+ msg = f"Cannot delete notification target {name}: {err}"
+ module.fail_json(msg=msg)
+
+
+@api_wrapper
+def create_target(module):
+ """ Create a new notifition target """
+ system = get_system(module)
+ name = module.params["name"]
+ protocol = module.params["protocol"]
+ host = module.params["host"]
+ port = module.params["port"]
+ facility = module.params["facility"]
+ transport = module.params["transport"]
+ post_test = module.params["post_test"]
+ visibility = module.params["visibility"]
+
+ path = "notifications/targets"
+
+ json_data = {
+ "name": name,
+ "protocol": protocol,
+ "host": host,
+ "port": port,
+ "facility": facility,
+ "transport": transport,
+ "visibility": visibility
+ }
+
+ try:
+ system.api.post(path=path, data=json_data)
+ except APICommandFailed as err:
+ msg = f"Cannot create notification target {name}: {err}"
+ module.fail_json(msg=msg)
+
+ if post_test:
+ target_id = find_target_id(module, system)
+ path = f"notifications/targets/{target_id}/test"
+ json_data = {}
+ try:
+ system.api.post(path=path, data=json_data)
+ except APICommandFailed as err:
+ msg = f"Cannot test notification target {name}: {err}"
+ module.fail_json(msg=msg)
+
+
+@api_wrapper
+def update_target(module):
+ """ Update an existing target. """
+ delete_target(module)
+ create_target(module)
+
+
+def handle_present(module):
+ """Make config present"""
+ system = get_system(module)
+ name = module.params["name"]
+ changed = False
+ if not module.check_mode:
+ target_id = find_target_id(module, system)
+ if not target_id:
+ create_target(module)
+ msg = f"Target {name} created"
+ else:
+ update_target(module)
+ msg = f"Target {name} deleted and recreated"
+ changed = True
+ module.exit_json(changed=changed, msg=msg)
+
+
+def handle_absent(module):
+ """Make config present"""
+ changed = False
+ name = module.params["name"]
+ system = get_system(module)
+ target_id = find_target_id(module, system)
+
+ if not target_id:
+ msg = f"Target {name} already does not exist"
+ changed = False
+ else:
+ msg = f"Target {name} has been deleted"
+ if not module.check_mode:
+ changed = True
+ delete_target(module)
+
+ module.exit_json(changed=changed, msg=msg)
+
+
+def execute_state(module):
+ """ Determine which state function to execute and do so """
+ state = module.params["state"]
+ try:
+ if state == "stat":
+ handle_stat(module)
+ elif state == "present":
+ handle_present(module)
+ elif state == "absent":
+ handle_absent(module)
+ else:
+ module.fail_json(msg=f"Internal handler error. Invalid state: {state}")
+ finally:
+ system = get_system(module)
+ system.logout()
+
+
+def check_options(module):
+ """ Verify module options are sane """
+ if module.params['protocol'] != "SYSLOG":
+ module.fail_json(msg="The only supported protocol is SYSLOG")
+
+
+def main():
+ """ Main """
+ argument_spec = infinibox_argument_spec()
+
+ argument_spec.update(
+ {
+ "name": {"required": True},
+ "host": {"required": False},
+ "port": {"required": False, "type": "int", "default": 514},
+ "transport": {"required": False, "default": "UDP", "choices": ["UDP", "TCP"]},
+ "protocol": {"required": False, "default": "SYSLOG", "choices": ["SYSLOG"]},
+ "facility": {"required": False, "default": "LOCAL7", "choices": ["LOCAL0", "LOCAL1", "LOCAL2", "LOCAL3", "LOCAL4", "LOCAL5", "LOCAL6", "LOCAL7"]},
+ "visibility": {"required": False, "default": "CUSTOMER", "choices": ["CUSTOMER", "INFINIDAT"]},
+ "post_test": {"required": False, "default": True, "type": "bool"},
+ "state": {"default": "present", "choices": ["stat", "present", "absent"]},
+ }
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_INFINISDK:
+ module.fail_json(msg=missing_required_lib("infinisdk"))
+
+ check_options(module)
+ execute_state(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/infinidat/infinibox/plugins/modules/infini_pool.py b/ansible_collections/infinidat/infinibox/plugins/modules/infini_pool.py
index d02657a19..43daa71be 100644
--- a/ansible_collections/infinidat/infinibox/plugins/modules/infini_pool.py
+++ b/ansible_collections/infinidat/infinibox/plugins/modules/infini_pool.py
@@ -1,7 +1,11 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# Copyright: (c) 2022, Infinidat <info@infinidat.com>
+# pylint: disable=invalid-name,use-dict-literal,line-too-long,wrong-import-position
+
+"""This module creates, deletes or modifies pools on Infinibox."""
+
+# Copyright: (c) 2024, Infinidat <info@infinidat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
@@ -77,7 +81,7 @@ EXAMPLES = r'''
- name: Disable SSD Cache on pool
infini_pool:
name: foo
- ssd_cache: no
+ ssd_cache: false
user: admin
password: secret
system: ibox001
@@ -85,7 +89,7 @@ EXAMPLES = r'''
- name: Disable Compression on pool
infini_pool:
name: foo
- compression: no
+ compression: false
user: admin
password: secret
system: ibox001
@@ -95,7 +99,6 @@ EXAMPLES = r'''
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-import traceback
from ansible_collections.infinidat.infinibox.plugins.module_utils.infinibox import (
HAS_INFINISDK,
api_wrapper,
@@ -114,7 +117,7 @@ except ImportError:
@api_wrapper
def create_pool(module, system):
- """Create Pool"""
+ """ Create Pool """
name = module.params['name']
size = module.params['size']
vsize = module.params['vsize']
@@ -141,8 +144,8 @@ def create_pool(module, system):
@api_wrapper
-def update_pool(module, system, pool):
- """Update Pool"""
+def update_pool(module, pool):
+ """ Update Pool """
changed = False
size = module.params['size']
@@ -184,23 +187,21 @@ def update_pool(module, system, pool):
@api_wrapper
def delete_pool(module, pool):
- """Delete Pool"""
+ """ Delete Pool """
if not module.check_mode:
pool.delete()
msg = 'Pool deleted'
module.exit_json(changed=True, msg=msg)
-def get_sys_pool(module):
+def handle_stat(module):
+ """ Show details about a pool """
system = get_system(module)
pool = get_pool(module, system)
- return (system, pool)
-
-def handle_stat(module):
- system, pool = get_sys_pool(module)
+ name = module.params['name']
if not pool:
- module.fail_json(msg='Pool {0} not found'.format(module.params['name']))
+ module.fail_json(msg=f'Pool {name} not found')
fields = pool.get_fields()
# print('fields: {0}'.format(fields))
free_physical_capacity = fields.get('free_physical_capacity', None)
@@ -216,17 +217,21 @@ def handle_stat(module):
def handle_present(module):
- system, pool = get_sys_pool(module)
+ """ Create pool """
+ system = get_system(module)
+ pool = get_pool(module, system)
if not pool:
create_pool(module, system)
module.exit_json(changed=True, msg="Pool created")
else:
- changed = update_pool(module, system, pool)
+ changed = update_pool(module, pool)
module.exit_json(changed=changed, msg="Pool updated")
def handle_absent(module):
- system, pool = get_sys_pool(module)
+ """ Remove pool """
+ system = get_system(module)
+ pool = get_pool(module, system)
if not pool:
module.exit_json(changed=False, msg="Pool already absent")
else:
@@ -235,6 +240,7 @@ def handle_absent(module):
def execute_state(module):
+ """Determine which state function to execute and do so"""
state = module.params['state']
try:
if state == 'stat':
@@ -244,13 +250,14 @@ def execute_state(module):
elif state == 'absent':
handle_absent(module)
else:
- module.fail_json(msg='Internal handler error. Invalid state: {0}'.format(state))
+ module.fail_json(msg=f'Internal handler error. Invalid state: {state}')
finally:
system = get_system(module)
system.logout()
def main():
+ """ Main """
argument_spec = infinibox_argument_spec()
argument_spec.update(
dict(
@@ -274,13 +281,13 @@ def main():
if module.params['size']:
try:
Capacity(module.params['size'])
- except Exception:
+ except Exception: # pylint: disable=broad-exception-caught
module.fail_json(msg='size (Physical Capacity) should be defined in MB, GB, TB or PB units')
if module.params['vsize']:
try:
Capacity(module.params['vsize'])
- except Exception:
+ except Exception: # pylint: disable=broad-exception-caught
module.fail_json(msg='vsize (Virtual Capacity) should be defined in MB, GB, TB or PB units')
execute_state(module)
diff --git a/ansible_collections/infinidat/infinibox/plugins/modules/infini_port.py b/ansible_collections/infinidat/infinibox/plugins/modules/infini_port.py
index 303127260..6502045a9 100644
--- a/ansible_collections/infinidat/infinibox/plugins/modules/infini_port.py
+++ b/ansible_collections/infinidat/infinibox/plugins/modules/infini_port.py
@@ -1,7 +1,11 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# Copyright: (c) 2022, Infinidat <info@infinidat.com>
+# pylint: disable=invalid-name,use-list-literal,use-dict-literal,line-too-long,wrong-import-position,multiple-statements
+
+"""This module manages ports on an Infinibox."""
+
+# Copyright: (c) 2024, Infinidat <info@infinidat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
@@ -11,7 +15,7 @@ __metaclass__ = type
DOCUMENTATION = r'''
---
module: infini_port
-version_added: '2.9.0'
+version_added: 2.9.0
short_description: Add and Delete fiber channel and iSCSI ports to a host on Infinibox
description:
- This module adds or deletes fiber channel or iSCSI ports to hosts on
@@ -21,6 +25,7 @@ options:
host:
description:
- Host Name
+ type: str
required: true
state:
description:
@@ -57,7 +62,7 @@ EXAMPLES = r'''
- "00:00:00:00:00:00:00"
- "11:11:11:11:11:11:11"
iqns:
- - "iqn.yyyy-mm.reverse-domain:unique-string"
+ - "iqn.yyyy-mm.reverse-domain:unique-string"
system: ibox01
user: admin
password: secret
@@ -67,8 +72,6 @@ EXAMPLES = r'''
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-import traceback
-
from ansible_collections.infinidat.infinibox.plugins.module_utils.infinibox import (
HAS_INFINISDK,
api_wrapper,
@@ -135,6 +138,7 @@ def delete_ports(module, system):
def get_sys_host(module):
+ """ Get parameters """
system = get_system(module)
host = get_host(module, system)
return (system, host)
@@ -168,12 +172,12 @@ def find_host_initiators_data(module, system, host, initiator_type):
Only include desired initiator keys for each initiator.
Return the filtered and edited host initiator list.
"""
- request = "initiators?page=1&page_size=1000&host_id={0}".format(host.id)
+ request = f"initiators?page=1&page_size=1000&host_id={host.id}"
# print("\nrequest:", request, "initiator_type:", initiator_type)
get_initiators_result = system.api.get(request, check_version=False)
result_code = get_initiators_result.status_code
if result_code != 200:
- msg = "get initiators REST call failed. code: {0}".format(result_code)
+ msg = f"get initiators REST call failed. code: {result_code}"
module.fail_json(msg=msg)
# Only return initiators of the desired type.
@@ -208,7 +212,7 @@ def find_host_initiators_data(module, system, host, initiator_type):
return host_initiators_by_type
-def get_port_fields(module, system, host):
+def get_port_fields(module, system, host): # pylint: disable=too-many-locals
"""
Return a dict with desired fields from FC and ISCSI ports associated with the host.
"""
@@ -297,13 +301,12 @@ def handle_stat(module):
Return json with status.
"""
system, host = get_sys_host(module)
-
host_name = module.params["host"]
if not host:
- module.fail_json(msg="Host {0} not found".format(host_name))
+ module.fail_json(msg=f"Host {host_name} not found")
field_dict = get_port_fields(module, system, host)
- result = dict(changed=False, msg="Host {0} ports found".format(host_name),)
+ result = dict(changed=False, msg=f"Host {host_name} ports found")
result = merge_two_dicts(result, field_dict)
module.exit_json(**result)
@@ -313,16 +316,15 @@ def handle_present(module):
Handle present state. Fail if host is None.
"""
system, host = get_sys_host(module)
-
host_name = module.params["host"]
if not host:
- module.fail_json(msg="Host {0} not found".format(host_name))
+ module.fail_json(msg=f"Host {host_name} not found")
changed = update_ports(module, system)
if changed:
- msg = "Mapping created for host {0}".format(host.get_name())
+ msg = f"Mapping created for host {host_name}"
else:
- msg = "No mapping changes were required for host {0}".format(host.get_name())
+ msg = f"No mapping changes were required for host {host_name}"
result = dict(changed=changed, msg=msg,)
module.exit_json(**result)
@@ -333,18 +335,17 @@ def handle_absent(module):
Handle absent state. Fail if host is None.
"""
system, host = get_sys_host(module)
+ host_name = module.params["host"]
if not host:
module.exit_json(
- changed=False, msg="Host {0} not found".format(module.params["host"])
+ changed=False, msg=f"Host {host_name} not found"
)
changed = delete_ports(module, system)
if changed:
- msg = "Mapping removed from host {0}".format(host.get_name())
+ msg = f"Mapping removed from host {host_name}"
else:
- msg = "No mapping changes were required. Mapping already removed from host {0}".format(
- host.get_name()
- )
+ msg = f"No mapping changes were required. Mapping already removed from host {host_name}"
result = dict(changed=changed, msg=msg,)
module.exit_json(**result)
@@ -364,26 +365,21 @@ def execute_state(module):
handle_absent(module)
else:
module.fail_json(
- msg="Internal handler error. Invalid state: {0}".format(state)
+ msg=f"Internal handler error. Invalid state: {state}"
)
finally:
system = get_system(module)
system.logout()
-def check_options(module):
- pass
-
-
def main():
"""
Gather auguments and manage mapping of vols to hosts.
"""
argument_spec = infinibox_argument_spec()
- null_list = list()
argument_spec.update(
dict(
- host=dict(required=True, type=str),
+ host=dict(required=True, type="str"),
state=dict(default="present", choices=["stat", "present", "absent"]),
wwns=dict(type="list", elements="str", default=list()),
iqns=dict(type="list", elements="str", default=list()),
@@ -395,7 +391,6 @@ def main():
if not HAS_INFINISDK:
module.fail_json(msg=missing_required_lib("infinisdk"))
- check_options(module)
execute_state(module)
diff --git a/ansible_collections/infinidat/infinibox/plugins/modules/infini_sso.py b/ansible_collections/infinidat/infinibox/plugins/modules/infini_sso.py
new file mode 100644
index 000000000..66219e08b
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/plugins/modules/infini_sso.py
@@ -0,0 +1,299 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2024, Infinidat <info@infinidat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+""" Manage SSO """
+
+# pylint: disable=invalid-name,use-dict-literal,line-too-long,wrong-import-position
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+---
+module: infini_sso
+version_added: 2.16.0
+short_description: Configures or queries SSO on Infinibox
+description:
+ - This module configures (present state) or gets information about (absent state) SSO on Infinibox
+author: David Ohlemacher (@ohlemacher)
+options:
+ name:
+ description:
+ - Sets a name to reference the SSO by.
+ required: true
+ type: str
+ issuer:
+ description:
+ - URI of the SSO issuer.
+ required: false
+ type: str
+ sign_on_url:
+ description:
+ - URL for sign on.
+ type: str
+ required: false
+ signed_assertion:
+ description:
+ - Signed assertion
+ type: bool
+ required: false
+ default: false
+ signed_response:
+ description:
+ - Signed response
+ required: false
+ type: bool
+ default: false
+ signing_certificate:
+ description:
+ - Signing certificate content.
+ type: str
+ required: false
+ enabled:
+ description:
+ - Determines if the SSO is enabled.
+ required: false
+ default: true
+ type: bool
+ state:
+ description:
+ - Creates/Modifies the SSO, when using state present.
+ - For state absent, the SSO is removed.
+ - State stat shows the existing SSO's details.
+ type: str
+ required: false
+ default: present
+ choices: [ "stat", "present", "absent" ]
+extends_documentation_fragment:
+ - infinibox
+"""
+
+EXAMPLES = r"""
+- name: Configure SSO
+ infini_sso:
+ name: OKTA
+ enabled: true
+ issuer: "http://www.okta.com/eykRra384o32rrTs"
+ sign_on_url: "https://infinidat.okta.com/app/infinidat_psus/exkra32oyyU6KCUCk2p7/sso/saml"
+ state: present
+ user: admin
+ password: secret
+ system: ibox001
+
+- name: Stat SSO
+ infini_sso:
+ name: OKTA
+ state: stat
+ user: admin
+ password: secret
+ system: ibox001
+
+- name: Clear SSO configuration
+ infini_sso:
+ state: absent
+ user: admin
+ password: secret
+ system: ibox001
+"""
+
+# RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+
+from ansible_collections.infinidat.infinibox.plugins.module_utils.infinibox import (
+ api_wrapper,
+ merge_two_dicts,
+ get_system,
+ infinibox_argument_spec,
+)
+
+try:
+ from infinisdk.core.exceptions import APICommandFailed
+except ImportError:
+ pass # Handled by HAS_INFINISDK from module_utils
+
+
+@api_wrapper
+def find_sso(module, name):
+ """ Find a SSO using its name """
+ path = f"config/sso/idps?name={name}"
+
+ try:
+ system = get_system(module)
+ sso_result = system.api.get(path=path).get_result()
+ except APICommandFailed as err:
+ msg = f"Cannot find SSO identity provider {name}: {err}"
+ module.fail_json(msg=msg)
+
+ return sso_result
+
+
+def handle_stat(module):
+ """ Handle the stat state """
+ name = module.params["name"]
+ sso_result = find_sso(module, name)
+ if not sso_result:
+ msg = f"SSO identity provider {name} not found. Cannot stat."
+ module.fail_json(msg=msg)
+
+ result = dict(
+ changed=False,
+ msg=f"SSO identity provider {name} stat found"
+ )
+
+ result = merge_two_dicts(result, sso_result[0])
+ result['signing_certificate'] = "redacted"
+ module.exit_json(**result)
+
+
+def handle_present(module): # pylint: disable=too-many-locals
+ """ Handle the present state """
+ enabled = module.params['enabled']
+ issuer = module.params['issuer']
+ sign_on_url = module.params['sign_on_url']
+ signed_assertion = module.params['signed_assertion']
+ signed_response = module.params['signed_response']
+ signing_certificate = module.params['signing_certificate']
+ name = module.params['name']
+
+ existing_sso = find_sso(module, name)
+ if existing_sso:
+ existing_sso_id = existing_sso[0]['id']
+ delete_sso(module, existing_sso_id)
+
+ path = "config/sso/idps"
+ data = {
+ "enabled": enabled,
+ "issuer": issuer,
+ "name": name,
+ "sign_on_url": sign_on_url,
+ "signed_assertion": signed_assertion,
+ "signed_response": signed_response,
+ "signing_certificate": signing_certificate,
+ }
+
+ try:
+ system = get_system(module)
+ sso_result = system.api.post(path=path, data=data).get_result()
+ except APICommandFailed as err:
+ msg = f"Cannot configure SSO identity provider named {name}: {err}"
+ module.fail_json(msg=msg)
+
+ if not existing_sso:
+ msg = f"SSO identity provider named {name} successfully configured"
+ else:
+ msg = f"SSO identity provider named {name} successfully removed and recreated with updated parameters"
+ result = dict(
+ changed=True,
+ msg=msg,
+ )
+ result = merge_two_dicts(result, sso_result)
+ result['signing_certificate'] = "redacted"
+
+ module.exit_json(**result)
+
+
+def delete_sso(module, sso_id):
+ """ Delete a SSO. Reference its ID. """
+ path = f"config/sso/idps/{sso_id}"
+ name = module.params["name"]
+ try:
+ system = get_system(module)
+ sso_result = system.api.delete(path=path).get_result()
+ except APICommandFailed as err:
+ msg = f"Cannot delete SSO identity provider {name}: {err}"
+ module.fail_json(msg=msg)
+ return sso_result
+
+
+def handle_absent(module):
+ """ Handle the absent state """
+ name = module.params["name"]
+ found_sso = find_sso(module, name)
+ if not found_sso:
+ result = dict(
+ changed=False,
+ msg=f"SSO {name} already not found"
+ )
+ module.exit_json(**result)
+
+ sso_id = found_sso[0]['id']
+ sso_result = delete_sso(module, sso_id)
+
+ if not sso_result:
+ msg = f"SSO identity provider named {name} with ID {sso_id} not found. Cannot delete."
+ module.fail_json(msg=msg)
+
+ result = dict(
+ changed=True,
+ msg=f"SSO identity provider named {name} deleted"
+ )
+
+ result = merge_two_dicts(result, sso_result)
+ result['signing_certificate'] = "redacted"
+ module.exit_json(**result)
+
+
+def execute_state(module):
+ """Handle states"""
+ state = module.params["state"]
+ try:
+ if state == "stat":
+ handle_stat(module)
+ elif state == "present":
+ handle_present(module)
+ elif state == "absent":
+ handle_absent(module)
+ else:
+ module.fail_json(msg=f"Internal handler error. Invalid state: {state}")
+ finally:
+ system = get_system(module)
+ system.logout()
+
+
+def check_options(module):
+ """Verify module options are sane"""
+ signing_certificate = module.params["signing_certificate"]
+ sign_on_url = module.params["sign_on_url"]
+ state = module.params["state"]
+ is_failed = False
+ msg = ""
+ if state in ["present"]:
+ if not sign_on_url:
+ msg += "A sign_on_url parameter must be provided. "
+ is_failed = True
+ if not signing_certificate:
+ msg += "A signing_certificate parameter must be provided. "
+ is_failed = True
+ if is_failed:
+ module.fail_json(msg=msg)
+
+
+def main():
+ """ Main """
+ argument_spec = infinibox_argument_spec()
+ argument_spec.update(
+ dict(
+ enabled=dict(required=False, type="bool", default=True),
+ issuer=dict(required=False, default=None),
+ name=dict(required=True),
+ sign_on_url=dict(required=False, default=None),
+ signed_assertion=dict(required=False, type="bool", default=False),
+ signed_response=dict(required=False, type="bool", default=False),
+ signing_certificate=dict(required=False, default=None, no_log=True),
+ state=dict(default="present", choices=["stat", "present", "absent"]),
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ check_options(module)
+ execute_state(module)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/infinidat/infinibox/plugins/modules/infini_user.py b/ansible_collections/infinidat/infinibox/plugins/modules/infini_user.py
index 01bcd0a5f..1915e5d87 100644
--- a/ansible_collections/infinidat/infinibox/plugins/modules/infini_user.py
+++ b/ansible_collections/infinidat/infinibox/plugins/modules/infini_user.py
@@ -1,16 +1,21 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# Copyright: (c) 2022, Infinidat <info@infinidat.com>
+# Copyright: (c) 2024, Infinidat <info@infinidat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+""" Manage Infinibox users """
+
+# pylint: disable=invalid-name,use-dict-literal,line-too-long,wrong-import-position
+
from __future__ import (absolute_import, division, print_function)
+
__metaclass__ = type
DOCUMENTATION = r'''
---
module: infini_user
-version_added: '2.9.0'
+version_added: 2.9.0
short_description: Create, Delete and Modify a User on Infinibox
description:
- This module creates, deletes or modifies a user on Infinibox.
@@ -21,7 +26,7 @@ options:
- The new user's Name. Once a user is created, the user_name may not be
changed from this module. It may be changed from the UI or from
infinishell.
- required: true
+ required: false
type: str
user_email:
description:
@@ -52,12 +57,42 @@ options:
type: str
state:
description:
- - Creates/Modifies user when present or removes when absent
+ - Creates/Modifies user when present or removes when absent.
+ - Use state 'login' to test user credentials.
+ - Use state 'reset' to reset a user password.
required: false
default: present
- choices: [ "stat", "reset_password", "present", "absent" ]
+ choices: [ "stat", "reset_password", "present", "absent", "login" ]
type: str
+ user_ldap_group_name:
+ description:
+ - Name of the LDAP user group
+ required: false
+ type: str
+ user_ldap_group_dn:
+ description:
+ - DN of the LDAP user group
+ required: false
+ type: str
+ user_ldap_group_ldap:
+ description:
+ - Name of the LDAP
+ required: false
+ type: str
+ user_ldap_group_role:
+ description:
+ - Role for the LDAP user group
+ choices: [ "admin", "pool_admin", "read_only" ]
+ required: false
+ type: str
+ user_ldap_group_pools:
+ description:
+ - A list of existing pools managed by the LDAP user group
+ default: []
+ required: false
+ type: list
+ elements: str
extends_documentation_fragment:
- infinibox
'''
@@ -81,27 +116,57 @@ EXAMPLES = r'''
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-import traceback
-
from ansible_collections.infinidat.infinibox.plugins.module_utils.infinibox import (
- HAS_INFINISDK,
api_wrapper,
infinibox_argument_spec,
get_system,
get_user,
- get_pool,
- unixMillisecondsToDate,
merge_two_dicts,
)
+
+HAS_INFINISDK = True
try:
- from infi.dtypes.iqn import make_iscsi_name
+ from infinisdk.core.exceptions import APICommandFailed
except ImportError:
- pass # Handled by HAS_INFINISDK from module_utils
+ HAS_INFINISDK = False
+
+
+@api_wrapper
+def find_user_ldap_group_id(module):
+ """
+ Find the ID of the LDAP user group by name
+ """
+ ldap_id = None
+ ldap_name = module.params["user_ldap_group_name"]
+ path = f"users?name={ldap_name}&type=eq%3ALdap"
+ system = get_system(module)
+ api_result = system.api.get(path=path)
+ if len(api_result.get_json()['result']) > 0:
+ result = api_result.get_json()['result'][0]
+ ldap_id = result['id']
+ return ldap_id
+
+
+@api_wrapper
+def find_ldap_id(module):
+ """
+ Find the ID of the LDAP by name
+ """
+ ldap_id = None
+ ldap_name = module.params["user_ldap_group_ldap"]
+ path = f"config/ldap?name={ldap_name}&fields=id"
+ system = get_system(module)
+ api_result = system.api.get(path=path)
+ if len(api_result.get_json()['result']) > 0:
+ result = api_result.get_json()['result'][0]
+ ldap_id = result['id']
+ return ldap_id
@api_wrapper
def create_user(module, system):
+ """ Create user """
if not module.check_mode:
user = system.users.create(name=module.params['user_name'],
password=module.params['user_password'],
@@ -120,6 +185,42 @@ def create_user(module, system):
return changed
+@api_wrapper
+def create_ldap_user_group(module):
+ """ Create ldap user group """
+ ldap_group_name = module.params['user_ldap_group_name']
+ ldap_name = module.params['user_ldap_group_ldap']
+ ldap_id = find_ldap_id(module)
+ ldap_pools = module.params['user_ldap_group_pools']
+ if not ldap_id:
+ msg = f'Cannot create LDAP group {ldap_group_name}. Cannot find ID for LDAP name {ldap_name}'
+ module.fail_json(msg=msg)
+ path = "users"
+ system = get_system(module)
+ data = {
+ "name": ldap_group_name,
+ "dn": module.params['user_ldap_group_dn'],
+ "ldap_id": ldap_id,
+ "role": module.params['user_ldap_group_role'],
+ "type": "Ldap",
+ }
+ try:
+ system.api.post(path=path, data=data)
+ except APICommandFailed as err:
+ if err.status_code in [409]:
+ msg = f'Cannot create user_ldap_group_name {ldap_group_name}: {err.message}'
+ module.fail_json(msg)
+ changed = True
+
+ user = get_user(module, system, ldap_group_name)
+ for pool_name in ldap_pools:
+ # Pylint is not finding Infinibox.pools but Python does.
+ pool = system.pools.get(name=pool_name) # pylint: disable=no-member
+ add_user_to_pool_owners(user, pool)
+
+ return changed
+
+
def add_user_to_pool_owners(user, pool):
"""
Find the current list of pool owners and add user using pool.set_owners().
@@ -127,27 +228,20 @@ def add_user_to_pool_owners(user, pool):
get owners, add user, then set owners. Further, we need to know if the
owners changed. Use sets of owners to compare.
"""
- # print("add_user_to_pool_owners(): start")
changed = False
pool_fields = pool.get_fields(from_cache=True, raw_value=True)
pool_owners = pool_fields.get('owners', [])
- # print('pool_owners:', pool_owners, 'pool_owners type:', type(pool_owners))
- # print('user:', user)
- # print('pool:', pool)
pool_owners_set = set(pool_owners)
- # print('pool_owners_set:', pool_owners_set)
new_pool_owners_set = pool_owners_set.copy()
new_pool_owners_set.add(user.id)
- # print('new_pool_owners_set:', new_pool_owners_set)
if pool_owners_set != new_pool_owners_set:
pool.set_owners([user])
changed = True
- # print("changed:", changed)
- # print("add_user_to_pool_owners(): end")
return changed
def remove_user_from_pool_owners(user, pool):
+ """ Remove user from pool owners """
changed = False
pool_fields = pool.get_fields(from_cache=True, raw_value=True)
pool_owners = pool_fields.get('owners', [])
@@ -162,9 +256,9 @@ def remove_user_from_pool_owners(user, pool):
@api_wrapper
def update_user(module, system, user):
- # print("update_user()")
+ """ Update user """
if user is None:
- raise AssertionError("Cannot update user {0}. User not found.".format(module.params["user_name"]))
+ raise AssertionError(f"Cannot update user {module.params['user_name']}. User not found.")
changed = False
fields = user.get_fields(from_cache=True, raw_value=True)
@@ -182,23 +276,34 @@ def update_user(module, system, user):
try:
pool_name = module.params['user_pool']
pool = system.pools.get(name=pool_name)
- except Exception as err:
- module.fail_json(msg='Cannot find pool {0}: {1}'.format(pool_name, err))
+ except Exception as err: # pylint: disable=broad-exception-caught
+ module.fail_json(msg=f'Cannot find pool {pool_name}: {err}')
if add_user_to_pool_owners(user, pool):
changed = True
return changed
+def update_ldap_user_group(module):
+ """ Update ldap user group by deleting and creating the LDAP user"""
+ changed = delete_ldap_user_group(module)
+ if not changed:
+ module.fail_json(msg='Cannot delete LDAP user {ldap_group_name}. Cannot find ID for LDAP group.')
+ create_ldap_user_group(module)
+ changed = True
+ return changed
+
+
@api_wrapper
-def reset_user_password(module, system, user):
- # print("update_user()")
+def reset_user_password(module, user):
+ """ Reset user's password """
if user is None:
- raise AssertionError("Cannot change user {0} password. User not found.".format(module.params["user_name"]))
+ module.fail_json(msg=f'Cannot change user {module.params["user_name"]} password. User not found.')
user.update_password(module.params['user_password'])
@api_wrapper
def delete_user(module, user):
+ """ Delete a user """
if not user:
return False
@@ -209,82 +314,196 @@ def delete_user(module, user):
return changed
-def get_sys_user(module):
+@api_wrapper
+def delete_ldap_user_group(module):
+ """ Delete a ldap user group """
+ changed = False
+ ldap_group_name = module.params['user_ldap_group_name']
+ ldap_group_id = find_user_ldap_group_id(module)
+ if not ldap_group_id:
+ changed = False
+ return changed
+ path = f"users/{ldap_group_id}?approved=yes"
system = get_system(module)
- user = get_user(module, system)
- # print("get_sys_user(): user:", user)
- return (system, user)
+ try:
+ system.api.delete(path=path)
+ changed = True
+ except APICommandFailed as err:
+ if err.status_code in [404]:
+ changed = False
+ else:
+ msg = f'An error occurred deleting user_ldap_group_name {ldap_group_name}: {err}'
+ module.fail_json(msg)
+ return changed
+
+
+def get_user_ldap_group(module):
+ """
+ Find the LDAP user group by name
+ """
+ result = None
+ user_ldap_group_name = module.params["user_ldap_group_name"]
+ path = f"users?name={user_ldap_group_name}&type=eq%3ALdap"
+ system = get_system(module)
+ api_result = system.api.get(path=path)
+ if len(api_result.get_json()['result']) > 0:
+ result = api_result.get_json()['result'][0]
+ return result
def get_user_fields(user):
+ """ Get user's fields """
pools = user.get_owned_pools()
pool_names = [pool.get_field('name') for pool in pools]
fields = user.get_fields(from_cache=True, raw_value=True)
- field_dict = dict(
- id=user.id,
- enabled=fields.get('enabled', None),
- role=fields.get('role', None),
- email=fields.get('email', None),
- pools=pool_names,
- )
+ field_dict = {
+ "dn": fields.get('dn', None),
+ "email": fields.get('email', None),
+ "enabled": fields.get('enabled', None),
+ "id": user.id,
+ "ldap_id": fields.get('ldap_id', None),
+ "pools": pool_names,
+ "role": fields.get('role', None),
+ "roles": fields.get('roles', []),
+ "type": fields.get('type', None),
+ }
return field_dict
def handle_stat(module):
- system, user = get_sys_user(module)
- user_name = module.params["user_name"]
- if not user:
- module.fail_json(msg='User {0} not found'.format(user_name))
- field_dict = get_user_fields(user)
- result = dict(
- changed=False,
- msg='User stat found'
- )
+ """ Handle stat for user or LDAP group user """
+ user_name = module.params['user_name']
+ user_ldap_group_name = module.params['user_ldap_group_name']
+ if user_name:
+ system = get_system(module)
+ user = get_user(module, system)
+ user_name = module.params["user_name"]
+ if not user:
+ module.fail_json(msg=f'User {user_name} not found')
+ field_dict = get_user_fields(user)
+ msg = f'User {user_name} stat found'
+ elif user_ldap_group_name:
+ user = get_user_ldap_group(module)
+ if not user:
+ module.fail_json(msg=f'user_ldap_group_name {user_ldap_group_name} not found')
+ field_dict = get_user_fields(user)
+ msg = f'User LDAP group {user_ldap_group_name} stat found'
+ else:
+ msg = 'Neither user_name nor user_ldap_group_name were provided for state stat'
+ module.fail_json(msg)
+
+ result = {
+ "changed": False,
+ "msg": msg,
+ }
result = merge_two_dicts(result, field_dict)
module.exit_json(**result)
def handle_present(module):
- system, user = get_sys_user(module)
+ """ Handle making user present """
user_name = module.params["user_name"]
- if not user:
- changed = create_user(module, system)
- msg = 'User {0} created'.format(user_name)
- else:
- changed = update_user(module, system, user)
- if changed:
- msg = 'User {0} updated'.format(user_name)
+ user_ldap_group_name = module.params["user_ldap_group_name"]
+ changed = False
+ msg = 'Message not set'
+
+ if user_name:
+ system = get_system(module)
+ user = get_user(module, system)
+ if not user:
+ changed = create_user(module, system)
+ msg = f'User {user_name} created'
+ else:
+ changed = update_user(module, system, user)
+ if changed:
+ msg = f'User {user_name} updated'
+ else:
+ msg = f'User {user_name} update required no changes'
+ elif user_ldap_group_name:
+ ldap_user = get_user_ldap_group(module)
+ if not ldap_user:
+ changed = create_ldap_user_group(module)
+ msg = f'LDAP user group {user_ldap_group_name} created'
else:
- msg = 'User {0} update required no changes'.format(user_name)
+ changed = update_ldap_user_group(module)
+ if changed:
+ msg = f'LDAP user group {user_ldap_group_name} updated by deleting and recreating with updated parameters'
+ else:
+ msg = f'LDAP user group {user_ldap_group_name} update not required - no changes'
+ else:
+ msg = 'Neither user_name nor user_ldap_group_name were provided'
+ module.fail_json(msg)
+
module.exit_json(changed=changed, msg=msg)
def handle_absent(module):
- system, user = get_sys_user(module)
- user_name = module.params["user_name"]
- if not user:
- changed = False
- msg = "User {0} already absent".format(user_name)
+ """ Handle making user absent """
+ user_name = module.params['user_name']
+ user_ldap_group_name = module.params['user_ldap_group_name']
+ if user_name:
+ system = get_system(module)
+ user = get_user(module, system)
+ user_name = module.params["user_name"]
+ if not user:
+ changed = False
+ msg = f"User {user_name} already absent"
+ else:
+ changed = delete_user(module, user)
+ msg = f"User {user_name} removed"
+ module.exit_json(changed=changed, msg=msg)
+ elif user_ldap_group_name:
+ changed = delete_ldap_user_group(module)
+ if changed:
+ msg = f"LDAP group user {user_ldap_group_name} removed"
+ else:
+ msg = f"LDAP group user {user_ldap_group_name} already absent"
+ module.exit_json(changed=changed, msg=msg)
else:
- changed = delete_user(module, user)
- msg = "User {0} removed".format(user_name)
- module.exit_json(changed=changed, msg=msg)
+ msg = 'Neither user_name nor user_ldap_group_name were provided for state absent'
+ module.fail_json(msg)
def handle_reset_password(module):
- system, user = get_sys_user(module)
+ """ Reset user password """
+ system = get_system(module)
+ user = get_user(module, system)
user_name = module.params["user_name"]
if not user:
- msg = 'Cannot change password. User {0} not found'.format(user_name)
+ msg = f'Cannot change password. User {user_name} not found'
module.fail_json(msg=msg)
else:
- reset_user_password(module, system, user)
- msg = 'User {0} password changed'.format(user_name)
+ reset_user_password(module, user)
+ msg = f'User {user_name} password changed'
module.exit_json(changed=True, msg=msg)
+def handle_login(module):
+ """ Test user credentials by logging in """
+ system = get_system(module)
+ user_name = module.params["user_name"]
+ user_password = module.params['user_password']
+ path = "users/login"
+ data = {
+ "username": user_name,
+ "password": user_password,
+ }
+ try:
+ login = system.api.post(path=path, data=data)
+ except APICommandFailed:
+ msg = f'User {user_name} failed to login'
+ module.fail_json(msg=msg)
+ if login.status_code == 200:
+ msg = f'User {user_name} successfully logged in'
+ module.exit_json(changed=False, msg=msg)
+ else:
+ msg = f'User {user_name} failed to login with status code: {login.status_code}'
+ module.fail_json(msg=msg)
+
+
def execute_state(module):
+ """ Find state and handle it """
state = module.params['state']
try:
if state == 'stat':
@@ -295,47 +514,85 @@ def execute_state(module):
handle_absent(module)
elif state == 'reset_password':
handle_reset_password(module)
+ elif state == 'login':
+ handle_login(module)
else:
- module.fail_json(msg='Internal handler error. Invalid state: {0}'.format(state))
+ module.fail_json(msg=f'Internal handler error. Invalid state: {state}')
finally:
system = get_system(module)
system.logout()
-def check_options(module):
+def check_options(module): # pylint: disable=too-many-branches
+ """ Check option logic """
state = module.params['state']
+ user_name = module.params['user_name']
user_role = module.params['user_role']
user_pool = module.params['user_pool']
+ user_ldap_group_name = module.params['user_ldap_group_name']
+ user_ldap_group_role = module.params['user_ldap_group_role']
if state == 'present':
if user_role == 'pool_admin' and not user_pool:
module.fail_json(msg='user_role "pool_admin" requires a user_pool to be provided')
if user_role != 'pool_admin' and user_pool:
module.fail_json(msg='Only user_role "pool_admin" should have a user_pool provided')
- valid_keys = ['user_email', 'user_password', 'user_role', 'user_enabled']
- for valid_key in valid_keys:
- # Check required keys provided
- try:
- not_used = module.params[valid_key]
- except KeyError:
- msg = 'For state "present", options {0} are required'.format(", ".join(valid_keys))
- module.fail_json(msg=msg)
- elif state == 'reset_password':
- if not module.params['user_password']:
- msg = 'For state "reset_password", user_password is required'
+ if not user_name and not user_ldap_group_name:
+ msg = 'For state "present", option user_name or user_ldap_group_name is required'
+ module.fail_json(msg=msg)
+
+ if user_name and user_ldap_group_name:
+ msg = 'For state "present", option user_name and user_ldap_group_name cannot both be provided'
+ module.fail_json(msg=msg)
+
+ if user_name:
+ required_user_params = [
+ 'user_email', 'user_password', 'user_role',
+ ]
+ for required_param in required_user_params:
+ param = module.params[required_param]
+ if param is None:
+ msg = f"For state 'present', option {required_param} is required with option user_name"
+ module.fail_json(msg=msg)
+
+ if user_ldap_group_name:
+ required_user_ldap_params = [
+ 'user_ldap_group_dn', 'user_ldap_group_ldap', 'user_ldap_group_role',
+ ]
+ for required_param in required_user_ldap_params:
+ param = module.params[required_param]
+ if not param:
+ msg = f'For state "present", option {required_param} is required with option user_ldap_group_name'
+ module.fail_json(msg=msg)
+ if user_ldap_group_role == 'pool_admin':
+ user_ldap_group_pools = module.params['user_ldap_group_pools']
+ if not user_ldap_group_pools:
+ msg = "For state 'present' and user_ldap_group_role 'pool_admin', user_ldap_group_pool must specify one or more pools"
+ module.fail_json(msg=msg)
+
+ elif state in ['reset_password', 'login']:
+ if not module.params['user_name'] or not module.params['user_password']:
+ msg = f"For state '{state}', user_name and user_password are both required"
+ module.fail_json(msg=msg)
def main():
+ """ main """
argument_spec = infinibox_argument_spec()
argument_spec.update(
dict(
- user_name=dict(required=True),
- user_email=dict(required=False),
- user_password=dict(required=False, no_log=True),
- user_role=dict(required=False, choices=['admin', 'pool_admin', 'read_only']),
+ user_name=dict(required=False),
+ user_email=dict(required=False, default=None),
+ user_password=dict(required=False, no_log=True, default=None),
+ user_role=dict(required=False, choices=['admin', 'pool_admin', 'read_only'], default=None),
user_enabled=dict(required=False, type='bool', default=True),
- user_pool=dict(required=False),
- state=dict(default='present', choices=['stat', 'reset_password', 'present', 'absent']),
+ user_pool=dict(required=False, default=None),
+ user_ldap_group_name=dict(required=False, default=None),
+ user_ldap_group_dn=dict(required=False, default=None),
+ user_ldap_group_ldap=dict(required=False, default=None),
+ user_ldap_group_role=dict(required=False, choices=['admin', 'pool_admin', 'read_only'], default=None),
+ user_ldap_group_pools=dict(required=False, type='list', elements='str', default=[]),
+ state=dict(default='present', choices=['stat', 'reset_password', 'present', 'absent', 'login']),
)
)
diff --git a/ansible_collections/infinidat/infinibox/plugins/modules/infini_users_repository.py b/ansible_collections/infinidat/infinibox/plugins/modules/infini_users_repository.py
new file mode 100644
index 000000000..bbc2a8d12
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/plugins/modules/infini_users_repository.py
@@ -0,0 +1,534 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# pylint: disable=invalid-name,use-list-literal,use-dict-literal,line-too-long,wrong-import-position,multiple-statements
+
+"""This module creates, deletes or modifies repositories of users that can log on to an Infinibox."""
+
+# Copyright: (c) 2024, Infinidat <info@infinidat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+---
+module: infini_users_repository
+version_added: 2.13.0
+short_description: Create, Delete or Modify respositories of users that can log on to an Infinibox
+description:
+ - This module creates, deletes or modifies respositories of users that can log on to an Infinibox.
+author: David Ohlemacher (@ohlemacher)
+options:
+ ad_auto_discover_servers:
+ description:
+ - AD auto discover servers
+ type: bool
+ choices: [true, false]
+ required: false
+ default: true
+ ad_domain_name:
+ description:
+ - AD domain name
+ type: str
+ required: false
+ bind_password:
+ description:
+ - The bind user password
+ type: str
+ required: false
+ bind_username:
+ description:
+ - The bind username
+ type: str
+ required: false
+ servers:
+ description:
+ - A list of LDAP servers. For an empty list, use [].
+ required: false
+ type: list
+ elements: str
+ default: []
+ name:
+ description:
+ - Name of repository
+ type: str
+ required: true
+ ldap_port:
+ description:
+ - LDAP or AD port to use
+ type: int
+ required: false
+ default: 636
+ ldap_servers:
+ description:
+ - List of LDAP or AD servers
+ type: list
+ elements: str
+ required: false
+ default: []
+ repository_type:
+ description:
+ - The type of repository
+ choices: ["ActiveDirectory", "LDAP"]
+ type: str
+ required: False
+ schema_group_memberof_attribute:
+ description:
+ - Schema group memberof attribute
+ type: str
+ required: false
+ schema_group_name_attribute:
+ description:
+ - Schema group name attribute
+ type: str
+ required: false
+ schema_groups_basedn:
+ description:
+ - Schema groups base DN
+ type: str
+ required: false
+ schema_group_class:
+ description:
+ - Schema group class
+ type: str
+ required: false
+ schema_users_basedn:
+ description:
+ - Schema user base DN
+ type: str
+ required: false
+ schema_user_class:
+ description:
+ - Schema user class
+ type: str
+ required: false
+ schema_username_attribute:
+ description:
+ - Schema username attribute
+ type: str
+ required: false
+ state:
+ description:
+ - Creates/Modifies users repositories when present or removes when absent.
+ - When getting the stats for a users repository, the module will test
+ connectivity to the repository and report the result in 'test_ok' as true or false.
+ required: false
+ type: str
+ default: present
+ choices: [ "stat", "present", "absent" ]
+ use_ldaps:
+ description:
+ - Use SSL (LDAPS)
+ type: bool
+ choices: ["true", "false"]
+ default: true
+
+extends_documentation_fragment:
+ - infinibox
+"""
+
+EXAMPLES = r"""
+- name: Create AD
+ infini_users_repository:
+ name: PSUS_ANSIBLE_ad
+ bind_password: tuFrAxahuYe4
+ bind_username: conldap
+ ad_domain_name: infinidat.com
+ repository_type: ActiveDirectory
+ schema_group_class: group
+ schema_group_memberof_attribute: memberof
+ schema_group_name_attribute: cn
+ schema_groups_basedn:
+ schema_user_class: user
+ schema_username_attribute: sAMAccountName
+ state: present
+ system: 172.20.67.167
+ user: dohlemacher
+ password: 123456
+
+- name: Stat AD
+ infini_users_repository:
+ name: PSUS_ANSIBLE_ad
+ state: stat
+ user: admin
+ password: secret
+ system: ibox001
+
+- name: Remove AD
+ infini_users_repository:
+ name: PSUS_ANSIBLE_ad
+ state: absent
+ user: admin
+ password: secret
+ system: ibox001
+"""
+
+# RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+from ansible_collections.infinidat.infinibox.plugins.module_utils.infinibox import (
+ api_wrapper,
+ get_system,
+ infinibox_argument_spec,
+)
+
+HAS_INFINISDK = True
+try:
+ from infinisdk.core.exceptions import APICommandFailed
+except ImportError:
+ HAS_INFINISDK = False
+
+
+@api_wrapper
+def get_users_repository(module, disable_fail=False):
+ """
+ Find and return users repository information
+ Use disable_fail when we are looking for an user repository
+ and it may or may not exist and neither case is an error.
+ """
+ system = get_system(module)
+ name = module.params["name"]
+
+ path = f"config/ldap?name={name}"
+ repo = system.api.get(path=path)
+
+ if repo:
+ result = repo.get_result()
+ if not disable_fail and not result:
+ msg = f"Users repository {name} not found. Cannot stat."
+ module.fail_json(msg=msg)
+ return result
+
+ if not disable_fail:
+ msg = f"Users repository {name} not found. Cannot stat."
+ module.fail_json(msg=msg)
+
+ return None
+
+
+@api_wrapper
+def test_users_repository(module, repository_id, disable_fail=False):
+ """
+ Find and return users repository information
+ Use disable_fail when we are looking for an user repository
+ and it may or may not exist and neither case is an error.
+ """
+ system = get_system(module)
+ name = module.params['name']
+ try:
+ path = f"config/ldap/{repository_id}/test"
+ result = system.api.post(path=path)
+ except APICommandFailed as err:
+ if disable_fail:
+ return False
+ msg = f"Users repository {name} testing failed: {str(err)}"
+ module.fail_json(msg=msg)
+ if result.response.status_code in [200]:
+ return True
+ return False
+
+
+def create_post_data(module):
+ """Create data dict for post rest calls"""
+ name = module.params["name"]
+ repo_type = module.params["repository_type"]
+ # search_order
+ schema_definition = {
+ "group_class": module.params["schema_group_class"],
+ "group_memberof_attribute": module.params["schema_group_memberof_attribute"],
+ "group_name_attribute": module.params["schema_group_name_attribute"],
+ "groups_basedn": module.params["schema_groups_basedn"],
+ "user_class": module.params["schema_user_class"],
+ "username_attribute": module.params["schema_username_attribute"],
+ "users_basedn": module.params["schema_users_basedn"],
+ }
+
+ # Create json data
+ data = {
+ "bind_password": module.params["bind_password"],
+ "bind_username": module.params["bind_username"],
+ "ldap_port": module.params["ldap_port"],
+ "name": name,
+ "repository_type": repo_type,
+ "schema_definition": schema_definition,
+ "use_ldaps": module.params["use_ldaps"],
+ }
+
+ # Add type specific fields to data dict
+ if repo_type == "ActiveDirectory":
+ data["domain_name"] = module.params["ad_domain_name"]
+ data["servers"] = []
+ else: # LDAP
+ data["domain_name"]: None
+ data["servers"] = module.params["ldap_servers"]
+ return data
+
+
+@api_wrapper
+def post_users_repository(module):
+ """
+ Create or update users LDAP or AD repo. The changed variable is found elsewhere.
+ Variable 'changed' not returned by design
+ """
+ system = get_system(module)
+ name = module.params["name"]
+ data = create_post_data(module)
+ path = "config/ldap"
+ try:
+ system.api.post(path=path, data=data)
+ except APICommandFailed as err:
+ if err.error_code == "LDAP_NAME_CONFLICT":
+ msg = f"Users repository {name} conflicts."
+ module.fail_json(msg=msg)
+ elif err.error_code == "LDAP_BAD_CREDENTIALS":
+ msg = f"Cannot create users repository {name} due to incorrect LDAP credentials: {err}"
+ module.fail_json(msg=msg)
+ else:
+ msg = f"Cannot create users repository {name}: {err}"
+ module.fail_json(msg=msg)
+
+
+@api_wrapper
+def delete_users_repository(module):
+ """Delete repo."""
+ system = get_system(module)
+ name = module.params['name']
+ changed = False
+ if not module.check_mode:
+ repo = get_users_repository(module, disable_fail=True)
+ if repo and len(repo) == 1:
+ path = f"config/ldap/{repo[0]['id']}"
+ try:
+ system.api.delete(path=path)
+ changed = True
+ except APICommandFailed as err:
+ if err.status_code != 404:
+ msg = f"Deletion of users repository {name} failed: {str(err)}"
+ module.fail_json(msg=msg)
+ return changed
+
+
+def handle_stat(module):
+ """Return users repository stat"""
+ name = module.params['name']
+ repos = get_users_repository(module)
+
+ if len(repos) != 1:
+ msg = f"Users repository {name} not found in repository list {repos}. Cannot stat."
+ module.fail_json(msg=msg)
+
+ result = repos[0]
+ repository_id = result.pop("id")
+ result["msg"] = f"Stats for user repository {name}"
+ result["repository_id"] = repository_id # Rename id to repository_id
+ result["test_ok"] = test_users_repository(module, repository_id=repository_id, disable_fail=True)
+ result["changed"] = False
+ module.exit_json(**result)
+
+
+@api_wrapper
+def is_existing_users_repo_equal_to_desired(module): # pylint: disable=too-many-return-statements,multiple-statements
+ """ Compare two user user repositories. Return a bool. """
+ newdata = create_post_data(module)
+ olddata = get_users_repository(module, disable_fail=True)[0]
+ if not olddata:
+ return False
+ if olddata['bind_username'] != newdata['bind_username']:
+ return False
+ if olddata['repository_type'] != newdata['repository_type']:
+ return False
+ if olddata['domain_name'] != newdata['domain_name']:
+ return False
+ if olddata['ldap_port'] != newdata['ldap_port']:
+ return False
+ if olddata['name'] != newdata['name']:
+ return False
+ if olddata['schema_definition'] != newdata['schema_definition']:
+ return False
+ if olddata['servers'] != newdata['servers']:
+ return False
+ if olddata['use_ldaps'] != newdata['use_ldaps']:
+ return False
+ return True
+
+
+def handle_present(module):
+ """Make users repository present"""
+ name = module.params['name']
+ changed = False
+ msg = ""
+ if not module.check_mode:
+ old_users_repo = None
+ old_users_repo_result = get_users_repository(module, disable_fail=True)
+ if old_users_repo_result:
+ old_users_repo = old_users_repo_result[0]
+ if is_existing_users_repo_equal_to_desired(module):
+ msg = f"Users repository {name} already exists. No changes required."
+ module.exit_json(changed=changed, msg=msg)
+ else:
+ msg = f"Users repository {name} is being recreated with new settings. "
+ delete_users_repository(module)
+ old_users_repo = None
+ changed = True
+
+ post_users_repository(module)
+
+ new_users_repo = get_users_repository(module)
+ changed = new_users_repo != old_users_repo
+ if changed:
+ if old_users_repo:
+ msg = f"{msg}Users repository {name} updated"
+ else:
+ msg = f"{msg}Users repository {name} created"
+ else:
+ msg = f"Users repository {name} unchanged since the value is the same as the existing users repository"
+ else:
+ msg = f"Users repository {name} unchanged due to check_mode"
+ module.exit_json(changed=changed, msg=msg)
+
+
+def handle_absent(module):
+ """Make users repository absent"""
+ name = module.params['name']
+ msg = f"Users repository {name} unchanged"
+ changed = False
+ if not module.check_mode:
+ changed = delete_users_repository(module)
+ if changed:
+ msg = f"Users repository {name} removed"
+ else:
+ msg = f"Users repository {name} did not exist so removal was unnecessary"
+ module.exit_json(changed=changed, msg=msg)
+
+
+def execute_state(module):
+ """Determine which state function to execute and do so"""
+ state = module.params["state"]
+ try:
+ if state == "stat":
+ handle_stat(module)
+ elif state == "present":
+ handle_present(module)
+ elif state == "absent":
+ handle_absent(module)
+ else:
+ module.fail_json(msg=f"Internal handler error. Invalid state: {state}")
+ finally:
+ system = get_system(module)
+ system.logout()
+
+
+def check_options(module):
+ """Verify module options are sane"""
+ # ad_domain_name = module.params["ad_domain_name"]
+ # bind_password = module.params["bind_password"]
+ # bind_username = module.params["bind_username"]
+ # ad_domain_name = module.params["ad_domain_name"]
+ # ldap_servers = module.params["ldap_servers"]
+ name = module.params["name"]
+ # ldap_port = module.params["ldap_port"]
+ repository_type = module.params["repository_type"]
+ # schema_group_memberof_attribute = module.params["schema_group_memberof_attribute"]
+ # schema_group_name_attribute = module.params["schema_group_name_attribute"]
+ # schema_groups_basedn = module.params["schema_groups_basedn"]
+ # schema_user_class = module.params["schema_user_class"]
+ # schema_username_attribute = module.params["schema_username_attribute"]
+ # schema_users_basedn = module.params["schema_users_basedn"]
+ state = module.params["state"]
+
+ if state == "stat":
+ pass
+ elif state == "present":
+ if repository_type:
+ common_params = ["bind_password", "bind_username", "schema_group_class",
+ "schema_group_memberof_attribute", "schema_group_name_attribute",
+ "schema_user_class", "schema_username_attribute",]
+ if repository_type == "LDAP": # Creating an LDAP
+ req_params = common_params
+ missing_params = [param for param in req_params if not is_set_in_params(module, param)]
+ if missing_params:
+ msg = f"Cannot create a new LDAP repository named {name} without providing required parameters: {missing_params}"
+ module.fail_json(msg=msg)
+
+ disallowed_params = ["ad_domain_name", "ad_auto_discover_servers"]
+ error_params = [param for param in disallowed_params if is_set_in_params(module, param)]
+ if error_params:
+ msg = f"Cannot create a new LDAP repository named {name} when providing disallowed parameters: {error_params}"
+ module.fail_json(msg=msg)
+ elif repository_type == "ActiveDirectory":
+ req_params = common_params
+ missing_params = [param for param in req_params if not is_set_in_params(module, param)]
+ if missing_params:
+ msg = f"Cannot create a new LDAP repository named {name} without providing required parameters: {missing_params}"
+ module.fail_json(msg=msg)
+
+ disallowed_params = ["ldap_servers"]
+ error_params = [param for param in disallowed_params if is_set_in_params(module, param)]
+ if error_params:
+ msg = f"Cannot create a new LDAP repository named {name} when providing disallowed parameters: {error_params}"
+ module.fail_json(msg=msg)
+ else:
+ msg = f"Unsupported respository type: {repository_type}"
+ module.fail_json(msg=msg)
+ else:
+ msg = "Cannot create a new users repository without providing a repository_type"
+ module.fail_json(msg=msg)
+ elif state == "absent":
+ pass
+ else:
+ module.fail_json(f"Invalid state '{state}' provided")
+
+
+def is_set_in_params(module, key):
+ """A utility function to test if a module param key is set to a truthy value.
+ Useful in list comprehensions."""
+ is_set = False
+ try:
+ if module.params[key]:
+ is_set = True
+ except KeyError:
+ pass
+ return is_set
+
+
+def main():
+ """Main module function"""
+ argument_spec = infinibox_argument_spec()
+
+ argument_spec.update(
+ {
+ "ad_auto_discover_servers": {"required": False, "choices": [True, False], "type": "bool", "default": True},
+ "ad_domain_name": {"required": False, "default": None},
+ "bind_password": {"required": False, "default": None, "no_log": True},
+ "bind_username": {"required": False, "default": None},
+ "ldap_servers": {"required": False, "default": [], "type": "list", "elements": "str"},
+ "name": {"required": True},
+ "ldap_port": {"required": False, "type": "int", "default": 636},
+ "repository_type": {"required": False, "choices": ["LDAP", "ActiveDirectory"], "default": None},
+ "schema_group_class": {"required": False, "default": None},
+ "schema_group_memberof_attribute": {"required": False, "default": None},
+ "schema_group_name_attribute": {"required": False, "default": None},
+ "schema_groups_basedn": {"required": False, "default": None},
+ "schema_user_class": {"required": False, "default": None},
+ "schema_username_attribute": {"required": False, "default": None},
+ "schema_users_basedn": {"required": False, "default": None},
+ "servers": {"required": False, "default": [], "type": "list", "elements": "str"},
+ "state": {"default": "present", "choices": ["stat", "present", "absent"]},
+ "use_ldaps": {"required": False, "choices": [True, False], "type": "bool", "default": True},
+ }
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ if not HAS_INFINISDK:
+ module.fail_json(msg=missing_required_lib("infinisdk"))
+
+ check_options(module)
+ execute_state(module)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/infinidat/infinibox/plugins/modules/infini_vol.py b/ansible_collections/infinidat/infinibox/plugins/modules/infini_vol.py
index 0c4a579bc..df5b0e756 100644
--- a/ansible_collections/infinidat/infinibox/plugins/modules/infini_vol.py
+++ b/ansible_collections/infinidat/infinibox/plugins/modules/infini_vol.py
@@ -1,14 +1,18 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# Copyright: (c) 2022, Infinidat <info@infinidat.com>
+# pylint: disable=invalid-name,use-dict-literal,too-many-branches,too-many-locals,line-too-long,wrong-import-position
+
+""" A module for managing Infinibox volumes """
+
+# Copyright: (c) 2024, Infinidat <info@infinidat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
+from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
---
module: infini_vol
version_added: '2.3.0'
@@ -19,24 +23,34 @@ author: David Ohlemacher (@ohlemacher)
options:
name:
description:
- - Volume Name
- required: true
+ - Volume name.
+ type: str
+ required: false
+ serial:
+ description:
+ - Volume serial number.
+ type: str
+ required: false
parent_volume_name:
description:
- Specify a volume name. This is the volume parent for creating a snapshot. Required if volume_type is snapshot.
+ type: str
required: false
pool:
description:
- Pool that master volume will reside within. Required for creating a master volume, but not a snapshot.
+ type: str
required: false
size:
description:
- Volume size in MB, GB or TB units. Required for creating a master volume, but not a snapshot
+ type: str
required: false
snapshot_lock_expires_at:
description:
- This will cause a snapshot to be locked at the specified date-time.
Uses python's datetime format YYYY-mm-dd HH:MM:SS.ffffff, e.g. 2020-02-13 16:21:59.699700
+ type: str
required: false
snapshot_lock_only:
description:
@@ -47,6 +61,7 @@ options:
state:
description:
- Creates/Modifies master volume or snapshot when present or removes when absent.
+ type: str
required: false
default: present
choices: [ "stat", "present", "absent" ]
@@ -59,12 +74,14 @@ options:
write_protected:
description:
- Specifies if the volume should be write protected. Default will be True for snapshots, False for regular volumes.
+ type: str
required: false
default: "Default"
choices: ["Default", "True", "False"]
volume_type:
description:
- Specifies the volume type, regular volume or snapshot.
+ type: str
required: false
default: master
choices: [ "master", "snapshot" ]
@@ -72,21 +89,23 @@ options:
description:
- Specify true to restore a volume (parent_volume_name) from an existing snapshot specified by the name field.
- State must be set to present and volume_type must be 'snapshot'.
+ type: bool
required: false
default: false
+
extends_documentation_fragment:
- infinibox
requirements:
- capacity
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Create new volume named foo under pool named bar
infini_vol:
name: foo
# volume_type: master # Default
size: 1TB
- thin_provision: yes
+ thin_provision: true
pool: bar
state: present
user: admin
@@ -115,25 +134,30 @@ EXAMPLES = r'''
user: admin
password: secret
system: ibox001
-'''
+"""
# RETURN = r''' # '''
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-import traceback
-
from ansible_collections.infinidat.infinibox.plugins.module_utils.infinibox import (
HAS_INFINISDK,
api_wrapper,
- infinibox_argument_spec,
- ObjectNotFound,
+ check_snapshot_lock_options,
get_pool,
get_system,
+ get_vol_by_sn,
get_volume,
- get_vol_sn,
+ infinibox_argument_spec,
+ manage_snapshot_locks,
)
+HAS_INFINISDK = True
+try:
+ from infinisdk.core.exceptions import APICommandFailed
+ from infinisdk.core.exceptions import ObjectNotFound
+except ImportError:
+ HAS_INFINISDK = False
HAS_CAPACITY = True
try:
@@ -141,19 +165,10 @@ try:
except ImportError:
HAS_CAPACITY = False
-HAS_ARROW = True
-try:
- import arrow
-except ImportError:
- HAS_ARROW = False
-
-except Exception:
- HAS_INFINISDK = False
-
@api_wrapper
def create_volume(module, system):
- """Create Volume"""
+ """ Create Volume """
changed = False
if not module.check_mode:
if module.params["thin_provision"]:
@@ -179,24 +194,21 @@ def create_volume(module, system):
@api_wrapper
def find_vol_id(module, system, vol_name):
- """
- Find the ID of this vol
- """
- vol_url = "volumes?name={0}&fields=id".format(vol_name)
+ """ Find the ID of this vol """
+ vol_url = f"volumes?name={vol_name}&fields=id"
vol = system.api.get(path=vol_url)
result = vol.get_json()["result"]
if len(result) != 1:
- module.fail_json("Cannot find a volume with name '{0}'".format(vol_name))
+ module.fail_json(f"Cannot find a volume with name '{vol_name}'")
vol_id = result[0]["id"]
- # print("Volume {} has ID {}".format(vol_name, vol_id))
return vol_id
@api_wrapper
def restore_volume_from_snapshot(module, system):
- """Use snapshot to restore a volume"""
+ """ Use snapshot to restore a volume """
changed = False
is_restoring = module.params["restore_volume_from_snapshot"]
volume_type = module.params["volume_type"]
@@ -209,62 +221,62 @@ def restore_volume_from_snapshot(module, system):
if not is_restoring:
raise AssertionError("A programming error occurred. is_restoring is not True")
if volume_type != "snapshot":
- module.fail_json(
- msg="Cannot restore a parent volume from snapshot unless the volume "
- "type is 'snapshot'"
- )
+ module.exit_json(msg="Cannot restore a parent volume from snapshot unless the volume type is 'snapshot'")
if not parent_volume_name:
- module.fail_json(
- msg="Cannot restore a parent volume from snapshot unless the parent "
- "volume name is specified"
- )
+ module.exit_json(msg="Cannot restore a parent volume from snapshot unless the parent volume name is specified")
if not module.check_mode:
- restore_url = "volumes/{0}/restore?approved=true".format(parent_volume_id)
+ restore_url = f"volumes/{parent_volume_id}/restore?approved=true"
restore_data = {
"source_id": snap_id,
}
- restore = system.api.post(path=restore_url, data=restore_data)
- changed = True
+ try:
+ system.api.post(path=restore_url, data=restore_data)
+ changed = True
+ except APICommandFailed as err:
+ module.fail_json(msg=f"Cannot restore volume {parent_volume_name} from {snap_name}: {err}")
return changed
@api_wrapper
def update_volume(module, volume):
- """Update Volume"""
+ """ Update Volume """
changed = False
+
+ if module.check_mode:
+ return changed
+
if module.params["size"]:
size = Capacity(module.params["size"]).roundup(64 * KiB)
if volume.get_size() != size:
- if not module.check_mode:
- volume.update_size(size)
+ volume.update_size(size)
changed = True
if module.params["thin_provision"] is not None:
- type = str(volume.get_provisioning())
- if type == "THICK" and module.params["thin_provision"]:
- if not module.check_mode:
- volume.update_provisioning("THIN")
+ provisioning = str(volume.get_provisioning())
+ if provisioning == "THICK" and module.params["thin_provision"]:
+ volume.update_provisioning("THIN")
changed = True
- if type == "THIN" and not module.params["thin_provision"]:
- if not module.check_mode:
- volume.update_provisioning("THICK")
+ if provisioning == "THIN" and not module.params["thin_provision"]:
+ volume.update_provisioning("THICK")
changed = True
if module.params["write_protected"] is not None:
is_write_prot = volume.is_write_protected()
desired_is_write_prot = module.params["write_protected"]
if is_write_prot != desired_is_write_prot:
volume.update_field("write_protected", desired_is_write_prot)
+ changed = True
return changed
@api_wrapper
def delete_volume(module, volume):
- """ Delete Volume. Volume could be a snapshot."""
+ """ Delete Volume. Volume could be a snapshot. """
+ changed = False
if not module.check_mode:
volume.delete()
- changed = True
- return True
+ changed = True
+ return changed
@api_wrapper
@@ -274,15 +286,11 @@ def create_snapshot(module, system):
parent_volume_name = module.params["parent_volume_name"]
try:
parent_volume = system.volumes.get(name=parent_volume_name)
- except ObjectNotFound as err:
- msg = "Cannot create snapshot {0}. Parent volume {1} not found".format(
- snapshot_name, parent_volume_name
- )
+ except ObjectNotFound:
+ msg = f"Cannot create snapshot {snapshot_name}. Parent volume {parent_volume_name} not found"
module.fail_json(msg=msg)
if not parent_volume:
- msg = "Cannot find new snapshot's parent volume named {0}".format(
- parent_volume_name
- )
+ msg = f"Cannot find new snapshot's parent volume named {parent_volume_name}"
module.fail_json(msg=msg)
if not module.check_mode:
if module.params["snapshot_lock_only"]:
@@ -304,9 +312,7 @@ def create_snapshot(module, system):
@api_wrapper
def update_snapshot(module, snapshot):
- """
- Update/refresh snapshot. May also lock it.
- """
+ """ Update/refresh snapshot. May also lock it. """
refresh_changed = False
if not module.params["snapshot_lock_only"]:
snap_is_locked = snapshot.get_lock_state() == "LOCKED"
@@ -331,88 +337,18 @@ def update_snapshot(module, snapshot):
return refresh_changed or lock_changed
-def get_sys_pool_vol_parname(module):
+def handle_stat(module):
+ """ Handle the stat state """
system = get_system(module)
- pool = get_pool(module, system)
- if module.params["name"]:
+ if module.params['name']:
volume = get_volume(module, system)
else:
- volume = get_vol_sn(module, system)
- parname = module.params["parent_volume_name"]
- return (system, pool, volume, parname)
-
-
-def check_snapshot_lock_options(module):
- """
- Check if specified options are feasible for a snapshot.
-
- Prevent very long lock times.
- max_delta_minutes limits locks to 30 days (43200 minutes).
-
- This functionality is broken out from manage_snapshot_locks() to allow
- it to be called by create_snapshot() before the snapshot is actually
- created.
- """
- snapshot_lock_expires_at = module.params["snapshot_lock_expires_at"]
-
- if snapshot_lock_expires_at: # Then user has specified wish to lock snap
- lock_expires_at = arrow.get(snapshot_lock_expires_at)
-
- # Check for lock in the past
- now = arrow.utcnow()
- if lock_expires_at <= now:
- msg = "Cannot lock snapshot with a snapshot_lock_expires_at "
- msg += "of '{0}' from the past".format(snapshot_lock_expires_at)
- module.fail_json(msg=msg)
-
- # Check for lock later than max lock, i.e. too far in future.
- max_delta_minutes = 43200 # 30 days in minutes
- max_lock_expires_at = now.shift(minutes=max_delta_minutes)
- if lock_expires_at >= max_lock_expires_at:
- msg = "snapshot_lock_expires_at exceeds {0} days in the future".format(
- max_delta_minutes // 24 // 60
- )
- module.fail_json(msg=msg)
-
-
-def manage_snapshot_locks(module, snapshot):
- """
- Manage the locking of a snapshot. Check for bad lock times.
- See check_snapshot_lock_options() which has additional checks.
- """
- name = module.params["name"]
- snapshot_lock_expires_at = module.params["snapshot_lock_expires_at"]
- snap_is_locked = snapshot.get_lock_state() == "LOCKED"
- current_lock_expires_at = snapshot.get_lock_expires_at()
- changed = False
-
- check_snapshot_lock_options(module)
-
- if snapshot_lock_expires_at: # Then user has specified wish to lock snap
- lock_expires_at = arrow.get(snapshot_lock_expires_at)
- if snap_is_locked and lock_expires_at < current_lock_expires_at:
- # Lock earlier than current lock
- msg = "snapshot_lock_expires_at '{0}' preceeds the current lock time of '{1}'".format(
- lock_expires_at, current_lock_expires_at
- )
- module.fail_json(msg=msg)
- elif snap_is_locked and lock_expires_at == current_lock_expires_at:
- # Lock already set to correct time
- pass
- else:
- # Set lock
- if not module.check_mode:
- snapshot.update_lock_expires_at(lock_expires_at)
- changed = True
- return changed
-
-
-def handle_stat(module):
- system, pool, volume, parname = get_sys_pool_vol_parname(module)
+ volume = get_vol_by_sn(module, system)
if not volume:
- msg = "Volume {0} not found. Cannot stat.".format(module.params["name"])
+ msg = f"Volume {module.params['name']} not found. Cannot stat."
module.fail_json(msg=msg)
fields = volume.get_fields() # from_cache=True, raw_value=True)
+
created_at = str(fields.get("created_at", None))
has_children = fields.get("has_children", None)
lock_expires_at = str(volume.get_lock_expires_at())
@@ -429,7 +365,7 @@ def handle_stat(module):
volume_type = fields.get("type", None)
write_protected = fields.get("write_protected", None)
if volume_type == "SNAPSHOT":
- msg = "Snapshot stat found"
+ msg = "Volume snapshot stat found"
else:
msg = "Volume stat found"
@@ -456,7 +392,12 @@ def handle_stat(module):
def handle_present(module):
- system, pool, volume, parname = get_sys_pool_vol_parname(module)
+ """ Handle the present state """
+ system = get_system(module)
+ if module.params["name"]:
+ volume = get_volume(module, system)
+ else:
+ volume = get_vol_by_sn(module, system)
volume_type = module.params["volume_type"]
is_restoring = module.params["restore_volume_from_snapshot"]
if volume_type == "master":
@@ -465,7 +406,11 @@ def handle_present(module):
module.exit_json(changed=changed, msg="Volume created")
else:
changed = update_volume(module, volume)
- module.exit_json(changed=changed, msg="Volume updated")
+ if changed:
+ msg = "Volume updated"
+ else:
+ msg = "Volume present. No changes were required"
+ module.exit_json(changed=changed, msg=msg)
elif volume_type == "snapshot":
snapshot = volume
if is_restoring:
@@ -484,7 +429,12 @@ def handle_present(module):
def handle_absent(module):
- system, pool, volume, parname = get_sys_pool_vol_parname(module)
+ """ Handle the absent state """
+ system = get_system(module)
+ if module.params["name"]:
+ volume = get_volume(module, system)
+ else:
+ volume = get_vol_by_sn(module, system)
volume_type = module.params["volume_type"]
if volume and volume.get_lock_state() == "LOCKED":
@@ -498,10 +448,10 @@ def handle_absent(module):
changed = delete_volume(module, volume)
module.exit_json(changed=changed, msg="Volume removed")
elif volume_type == "snapshot":
- if not volume:
+ snapshot = volume
+ if not snapshot:
module.exit_json(changed=False, msg="Snapshot already absent")
else:
- snapshot = volume
changed = delete_volume(module, snapshot)
module.exit_json(changed=changed, msg="Snapshot removed")
else:
@@ -509,7 +459,7 @@ def handle_absent(module):
def execute_state(module):
- # Handle different write_protected defaults depending on volume_type.
+ """ Handle each state. Handle different write_protected defaults depending on volume_type. """
if module.params["volume_type"] == "snapshot":
if module.params["write_protected"] in ["True", "true", "Default"]:
module.params["write_protected"] = True
@@ -521,8 +471,8 @@ def execute_state(module):
else:
module.params["write_protected"] = True
else:
- msg = f"An error has occurred handling volume_type '{module.params['volume_type']}' or write_protected '{module.params['write_protected']}' values"
- module.fail_json(msg=msg)
+ msg = f"An error has occurred handling volume_type {module.params['volume_type']} or write_protected {module.params['write_protected']} values"
+ module.fail_json(msg)
state = module.params["state"]
try:
@@ -533,9 +483,7 @@ def execute_state(module):
elif state == "absent":
handle_absent(module)
else:
- module.fail_json(
- msg="Internal handler error. Invalid state: {0}".format(state)
- )
+ module.fail_json(msg=f"Internal handler error. Invalid state: {state}")
finally:
system = get_system(module)
system.logout()
@@ -543,22 +491,32 @@ def execute_state(module):
def check_options(module):
"""Verify module options are sane"""
+ name = module.params["name"]
+ serial = module.params["serial"]
state = module.params["state"]
size = module.params["size"]
pool = module.params["pool"]
volume_type = module.params["volume_type"]
parent_volume_name = module.params["parent_volume_name"]
+ if state == "stat":
+ if not name and not serial:
+ msg = "Name or serial parameter must be provided"
+ module.fail_json(msg)
+ if state in ["present", "absent"]:
+ if not name:
+ msg = "Name parameter must be provided"
+ module.fail_json(msg=msg)
+
if state == "present":
if volume_type == "master":
- if state == "present":
- if parent_volume_name:
- msg = "parent_volume_name should not be specified "
- msg += "if volume_type is 'volume'. Snapshots only."
- module.fail_json(msg=msg)
- if not size:
- msg = "Size is required to create a volume"
- module.fail_json(msg=msg)
+ if parent_volume_name:
+ msg = "parent_volume_name should not be specified "
+ msg += "if volume_type is 'master'. Used for snapshots only."
+ module.fail_json(msg=msg)
+ if not size:
+ msg = "Size is required to create a volume"
+ module.fail_json(msg=msg)
elif volume_type == "snapshot":
if size or pool:
msg = "Neither pool nor size should not be specified "
@@ -572,26 +530,28 @@ def check_options(module):
else:
msg = "A programming error has occurred"
module.fail_json(msg=msg)
+ if not pool and volume_type == "master":
+ msg = "For state 'present', pool is required"
+ module.fail_json(msg=msg)
def main():
+ """ Main """
argument_spec = infinibox_argument_spec()
argument_spec.update(
dict(
- name=dict(required=False),
- parent_volume_name=dict(default=None, required=False, type=str),
+ name=dict(required=False, default=None),
+ parent_volume_name=dict(default=None, required=False, type="str"),
pool=dict(required=False),
- size=dict(),
- serial=dict(),
+ restore_volume_from_snapshot=dict(default=False, type="bool"),
+ serial=dict(required=False, default=None),
+ size=dict(required=False, default=None),
snapshot_lock_expires_at=dict(),
- snapshot_lock_only=dict(type="bool", default=False),
+ snapshot_lock_only=dict(default=False, type="bool"),
state=dict(default="present", choices=["stat", "present", "absent"]),
thin_provision=dict(type="bool", default=True),
- write_protected=dict(
- default="Default", choices=["Default", "True", "False"]
- ),
volume_type=dict(default="master", choices=["master", "snapshot"]),
- restore_volume_from_snapshot=dict(default=False, type=bool),
+ write_protected=dict(default="Default", choices=["Default", "True", "False"]),
)
)
@@ -600,16 +560,14 @@ def main():
if not HAS_INFINISDK:
module.fail_json(msg=missing_required_lib("infinisdk"))
- if not HAS_ARROW:
- module.fail_json(msg=missing_required_lib("arrow"))
+ if not HAS_CAPACITY:
+ module.fail_json(msg=missing_required_lib("capacity"))
if module.params["size"]:
try:
Capacity(module.params["size"])
- except Exception:
- module.fail_json(
- msg="size (Physical Capacity) should be defined in MB, GB, TB or PB units"
- )
+ except Exception: # pylint: disable=broad-exception-caught
+ module.fail_json(msg="size (Physical Capacity) should be defined in MB, GB, TB or PB units")
check_options(module)
execute_state(module)
diff --git a/ansible_collections/infinidat/infinibox/requirements-dev.txt b/ansible_collections/infinidat/infinibox/requirements-dev.txt
index c77aeebbf..29818807d 100644
--- a/ansible_collections/infinidat/infinibox/requirements-dev.txt
+++ b/ansible_collections/infinidat/infinibox/requirements-dev.txt
@@ -4,3 +4,4 @@ pycodestyle
pylint
pytest
PyYAML
+black
diff --git a/ansible_collections/infinidat/infinibox/scripts/syslog.log b/ansible_collections/infinidat/infinibox/scripts/syslog.log
new file mode 100644
index 000000000..973757b3a
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/scripts/syslog.log
@@ -0,0 +1,437 @@
+<190>JAN 17 20:40:23 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=0, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing of UDP socket 1
+<190>JAN 17 20:40:23 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=0, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing of UDP socket 2
+<190>Jan 17 20:40:23 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=29418878, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing
+<190>JAN 17 20:45:42 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=0, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing of UDP socket 1
+<190>JAN 17 20:45:42 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=0, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing of UDP socket 2
+<190>Jan 17 20:45:43 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=29418878, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing
+<190>JAN 17 20:46:08 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=0, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing of UDP socket 1
+<190>JAN 17 20:46:08 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=0, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing of UDP socket 2
+<190>Jan 17 20:46:08 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=29418878, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing
+<190>JAN 17 20:46:15 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=0, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing of UDP socket 1
+<190>JAN 17 20:46:15 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=0, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing of UDP socket 2
+<190>Jan 17 20:46:15 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=29418878, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing
+<190>Jan 17 20:52:05 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_LOGGED_OUT, seq_num=8823, level=INFO, username=admin, system=vbox-ps-01, description=User 'admin' logged out
+<190>Jan 17 20:52:18 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_LOGIN_SUCCESS, seq_num=8824, level=INFO, username=admin, system=vbox-ps-01, description=User 'admin' successfully logged in from IP 172.31.5.232
+<190>Jan 17 21:00:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_LOGIN_SUCCESS, seq_num=8825, level=INFO, username=infinidat, system=vbox-ps-01, description=User 'infinidat' successfully logged in from IP 172.20.89.195
+<190>Jan 17 21:00:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_LOGIN_SUCCESS, seq_num=8826, level=INFO, username=infinidat, system=vbox-ps-01, description=User 'infinidat' successfully logged in from IP 172.20.89.195
+<190>Jan 17 21:06:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=8827, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 17 21:06:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=8828, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 17 21:17:14 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_LOGIN_SUCCESS, seq_num=8829, level=INFO, username=infinidat, system=vbox-ps-01, description=User 'infinidat' successfully logged in from IP 172.20.89.195
+<190>Jan 17 21:17:14 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_LOGIN_SUCCESS, seq_num=8830, level=INFO, username=infinidat, system=vbox-ps-01, description=User 'infinidat' successfully logged in from IP 172.20.89.195
+<190>Jan 17 21:23:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=8831, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 17 21:23:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=8832, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 17 21:34:33 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_LOGIN_SUCCESS, seq_num=8833, level=INFO, username=infinidat, system=vbox-ps-01, description=User 'infinidat' successfully logged in from IP 172.20.89.195
+<190>Jan 17 21:34:34 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_LOGIN_SUCCESS, seq_num=8834, level=INFO, username=infinidat, system=vbox-ps-01, description=User 'infinidat' successfully logged in from IP 172.20.89.195
+<190>Jan 17 21:40:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=8835, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 17 21:40:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=8836, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 17 21:51:50 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_LOGIN_SUCCESS, seq_num=8837, level=INFO, username=infinidat, system=vbox-ps-01, description=User 'infinidat' successfully logged in from IP 172.20.89.195
+<190>Jan 17 21:51:51 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_LOGIN_SUCCESS, seq_num=8838, level=INFO, username=infinidat, system=vbox-ps-01, description=User 'infinidat' successfully logged in from IP 172.20.89.195
+<190>Jan 17 21:57:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=8839, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 17 21:57:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=8840, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 17 22:31:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=8847, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 17 22:31:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=8848, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>JAN 17 22:36:13 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=0, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing of UDP socket 1
+<190>JAN 17 22:36:13 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=0, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing of UDP socket 2
+<190>Jan 17 22:36:13 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=29418878, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing
+<190>JAN 17 22:40:28 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=0, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing of UDP socket 1
+<190>JAN 17 22:40:28 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=0, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing of UDP socket 2
+<190>Jan 17 22:40:28 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=29418878, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing
+<190>Jan 17 22:40:31 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_CREATED, seq_num=8933, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'syslog' created
+<190>Jan 17 22:40:43 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_CREATED, seq_num=8944, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'production_email' created
+<190>JAN 17 22:48:49 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=0, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing of UDP socket 1
+<190>JAN 17 22:48:49 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=0, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing of UDP socket 2
+<190>Jan 17 22:48:49 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=29418878, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing
+<190>Jan 17 22:48:52 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_CREATED, seq_num=8989, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'syslog' created
+<190>Jan 17 23:05:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9007, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 17 23:05:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9008, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 17 23:08:40 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DEFAULT_COMPRESSION_MODIFIED, seq_num=9017, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=The system default compression was modified to true
+<190>Jan 17 23:08:59 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_GROUP_DELETED, seq_num=9034, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Group 'CO-ReadOnly' has been deleted
+<190>Jan 17 23:09:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_GROUP_CREATED, seq_num=9035, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Group 'CO-ReadOnly' has been created with role:READ_ONLY. The group links to DN:'CN=Infinidat,OU=Security Groups,OU=Groups,OU=Corp,DC=infinidat,DC=com'
+<190>Jan 17 23:12:09 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_UPDATED, seq_num=9041, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'setup_email' updated
+<190>Jan 17 23:12:09 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_UPDATED, seq_num=9042, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'setup_email' updated
+<190>Jan 17 23:12:09 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_UPDATED, seq_num=9043, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'setup_email' updated
+<190>Jan 17 23:12:09 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_UPDATED, seq_num=9044, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'setup_email' updated
+<190>Jan 17 23:12:09 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_UPDATED, seq_num=9045, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'setup_email' updated
+<190>Jan 17 23:12:14 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DEFAULT_COMPRESSION_MODIFIED, seq_num=9051, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=The system default compression was modified to true
+<190>Jan 17 23:12:32 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_GROUP_DELETED, seq_num=9068, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Group 'CO-ReadOnly' has been deleted
+<190>Jan 17 23:12:32 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_GROUP_CREATED, seq_num=9069, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Group 'CO-ReadOnly' has been created with role:READ_ONLY. The group links to DN:'CN=Infinidat,OU=Security Groups,OU=Groups,OU=Corp,DC=infinidat,DC=com'
+<190>Jan 17 23:14:54 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_UPDATED, seq_num=9076, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'setup_email' updated
+<190>Jan 17 23:14:54 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_UPDATED, seq_num=9077, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'setup_email' updated
+<190>Jan 17 23:14:54 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_UPDATED, seq_num=9078, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'setup_email' updated
+<190>Jan 17 23:14:54 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_UPDATED, seq_num=9079, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'setup_email' updated
+<190>Jan 17 23:14:54 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_UPDATED, seq_num=9080, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'setup_email' updated
+<190>Jan 17 23:14:54 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_UPDATED, seq_num=9081, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'setup_email' updated
+<190>Jan 17 23:14:59 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DEFAULT_COMPRESSION_MODIFIED, seq_num=9087, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=The system default compression was modified to true
+<190>Jan 17 23:15:15 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_GROUP_DELETED, seq_num=9104, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Group 'CO-ReadOnly' has been deleted
+<190>Jan 17 23:15:16 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_GROUP_CREATED, seq_num=9105, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Group 'CO-ReadOnly' has been created with role:READ_ONLY. The group links to DN:'CN=Infinidat,OU=Security Groups,OU=Groups,OU=Corp,DC=infinidat,DC=com'
+<190>JAN 17 23:15:25 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=0, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing of UDP socket 1
+<190>JAN 17 23:15:25 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=0, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing of UDP socket 2
+<190>Jan 17 23:15:25 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=29418878, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing
+<190>Jan 17 23:15:40 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_CREATED, seq_num=9128, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'production_email' created
+<190>Jan 17 23:15:43 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_DELETED, seq_num=9131, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'setup_email' deleted
+<190>Jan 17 23:17:37 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_CREATED, seq_num=9134, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'setup_email' created
+<190>Jan 17 23:17:41 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_DELETED, seq_num=9137, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'production_email' deleted
+<190>Jan 17 23:17:43 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DEFAULT_COMPRESSION_MODIFIED, seq_num=9140, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=The system default compression was modified to true
+<190>Jan 17 23:17:59 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_GROUP_DELETED, seq_num=9158, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Group 'CO-ReadOnly' has been deleted
+<190>Jan 17 23:18:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_GROUP_CREATED, seq_num=9159, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Group 'CO-ReadOnly' has been created with role:READ_ONLY. The group links to DN:'CN=Infinidat,OU=Security Groups,OU=Groups,OU=Corp,DC=infinidat,DC=com'
+<190>JAN 17 23:18:09 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=0, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing of UDP socket 1
+<190>JAN 17 23:18:09 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=0, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing of UDP socket 2
+<190>Jan 17 23:18:09 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=29418878, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing
+<190>Jan 17 23:18:27 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_DELETED, seq_num=9187, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'setup_email' deleted
+<190>Jan 17 23:23:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9189, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 17 23:23:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9190, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 17 23:31:20 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DEFAULT_COMPRESSION_MODIFIED, seq_num=9199, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=The system default compression was modified to true
+<190>Jan 17 23:31:36 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_GROUP_DELETED, seq_num=9216, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Group 'CO-ReadOnly' has been deleted
+<190>Jan 17 23:31:37 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_GROUP_CREATED, seq_num=9217, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Group 'CO-ReadOnly' has been created with role:READ_ONLY. The group links to DN:'CN=Infinidat,OU=Security Groups,OU=Groups,OU=Corp,DC=infinidat,DC=com'
+<190>JAN 17 23:31:48 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=0, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing of UDP socket 1
+<190>JAN 17 23:31:48 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=0, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing of UDP socket 2
+<190>Jan 17 23:31:48 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=29418878, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing
+<190>JAN 17 23:36:24 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=0, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing of UDP socket 1
+<190>JAN 17 23:36:24 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=0, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing of UDP socket 2
+<190>Jan 17 23:36:24 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=29418878, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing
+<190>JAN 17 23:36:27 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=0, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing of UDP socket 1
+<190>JAN 17 23:36:27 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=0, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing of UDP socket 2
+<190>Jan 17 23:36:27 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=29418878, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing
+<190>Jan 17 23:36:33 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_CREATED, seq_num=9283, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'graylog' created
+<190>Jan 17 23:36:33 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_CREATED, seq_num=9283, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'graylog' created
+<190>Jan 17 23:39:57 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_DELETED, seq_num=9299, level=INFO, username=admin, system=vbox-ps-01, description=Notification rule 'graylog' deleted
+<190>Jan 17 23:40:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9300, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 17 23:40:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9301, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 17 23:40:08 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_TARGET_DELETED, seq_num=9302, level=INFO, username=admin, system=vbox-ps-01, description=SYSLOG notification target 'graylog_target' deleted
+<190>Jan 17 23:40:29 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DEFAULT_COMPRESSION_MODIFIED, seq_num=9311, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=The system default compression was modified to true
+<190>Jan 17 23:40:45 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_GROUP_DELETED, seq_num=9328, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Group 'CO-ReadOnly' has been deleted
+<190>Jan 17 23:40:46 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_GROUP_CREATED, seq_num=9329, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Group 'CO-ReadOnly' has been created with role:READ_ONLY. The group links to DN:'CN=Infinidat,OU=Security Groups,OU=Groups,OU=Corp,DC=infinidat,DC=com'
+<190>JAN 17 23:40:59 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=0, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing of UDP socket 1
+<190>JAN 17 23:40:59 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=0, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing of UDP socket 2
+<190>Jan 17 23:40:59 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=29418878, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing
+<190>JAN 17 23:41:02 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=0, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing of UDP socket 1
+<190>JAN 17 23:41:02 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=0, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing of UDP socket 2
+<190>Jan 17 23:41:02 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=29418878, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing
+<190>Jan 17 23:41:08 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_CREATED, seq_num=9351, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'syslog' created
+<190>Jan 17 23:41:11 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_CREATED, seq_num=9354, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'graylog' created
+<190>Jan 17 23:41:11 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_CREATED, seq_num=9354, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'graylog' created
+<190>Jan 17 23:41:13 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_UPDATED, seq_num=9357, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'graylog' updated
+<190>Jan 17 23:41:13 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_UPDATED, seq_num=9357, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'graylog' updated
+<190>Jan 17 23:41:13 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_UPDATED, seq_num=9358, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'graylog' updated
+<190>Jan 17 23:41:13 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_UPDATED, seq_num=9358, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'graylog' updated
+<190>Jan 17 23:41:13 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_UPDATED, seq_num=9359, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'graylog' updated
+<190>Jan 17 23:41:13 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_UPDATED, seq_num=9359, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'graylog' updated
+<190>Jan 17 23:41:13 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_UPDATED, seq_num=9360, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'graylog' updated
+<190>Jan 17 23:41:13 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_UPDATED, seq_num=9360, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'graylog' updated
+<190>Jan 17 23:41:14 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_UPDATED, seq_num=9361, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'graylog' updated
+<190>Jan 17 23:41:14 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_UPDATED, seq_num=9361, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'graylog' updated
+<190>Jan 17 23:41:29 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_DELETED, seq_num=9375, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'setup_email' deleted
+<190>Jan 17 23:41:29 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_DELETED, seq_num=9375, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'setup_email' deleted
+<190>Jan 17 23:43:14 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_CREATED, seq_num=9378, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'setup_email' created
+<190>Jan 17 23:43:14 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_CREATED, seq_num=9378, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'setup_email' created
+<190>Jan 17 23:43:17 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_DELETED, seq_num=9381, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'production_email' deleted
+<190>Jan 17 23:43:17 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_DELETED, seq_num=9381, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'production_email' deleted
+<190>Jan 17 23:43:19 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DEFAULT_COMPRESSION_MODIFIED, seq_num=9385, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=The system default compression was modified to true
+<190>Jan 17 23:43:19 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DEFAULT_COMPRESSION_MODIFIED, seq_num=9385, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=The system default compression was modified to true
+<190>Jan 17 23:43:36 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_GROUP_DELETED, seq_num=9402, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Group 'CO-ReadOnly' has been deleted
+<190>Jan 17 23:43:36 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_GROUP_DELETED, seq_num=9402, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Group 'CO-ReadOnly' has been deleted
+<190>Jan 17 23:43:36 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_GROUP_CREATED, seq_num=9403, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Group 'CO-ReadOnly' has been created with role:READ_ONLY. The group links to DN:'CN=Infinidat,OU=Security Groups,OU=Groups,OU=Corp,DC=infinidat,DC=com'
+<190>Jan 17 23:43:36 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_GROUP_CREATED, seq_num=9403, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Group 'CO-ReadOnly' has been created with role:READ_ONLY. The group links to DN:'CN=Infinidat,OU=Security Groups,OU=Groups,OU=Corp,DC=infinidat,DC=com'
+<190>Jan 17 23:43:42 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_DELETED, seq_num=9408, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'syslog' deleted
+<190>JAN 17 23:43:50 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=0, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing of UDP socket 1
+<190>JAN 17 23:43:50 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=0, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing of UDP socket 2
+<190>Jan 17 23:43:50 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=29418878, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing
+<190>Jan 17 23:43:59 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_CREATED, seq_num=9428, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'syslog' created
+<190>Jan 17 23:44:02 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_CREATED, seq_num=9431, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'graylog' created
+<190>Jan 17 23:44:02 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_CREATED, seq_num=9431, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'graylog' created
+<190>Jan 17 23:44:05 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_UPDATED, seq_num=9435, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'graylog' updated
+<190>Jan 17 23:44:05 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_UPDATED, seq_num=9435, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'graylog' updated
+<190>Jan 17 23:44:05 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_UPDATED, seq_num=9436, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'graylog' updated
+<190>Jan 17 23:44:05 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_UPDATED, seq_num=9436, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'graylog' updated
+<190>Jan 17 23:44:05 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_UPDATED, seq_num=9437, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'graylog' updated
+<190>Jan 17 23:44:05 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_UPDATED, seq_num=9437, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'graylog' updated
+<190>Jan 17 23:44:05 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_UPDATED, seq_num=9438, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'graylog' updated
+<190>Jan 17 23:44:05 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_UPDATED, seq_num=9438, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'graylog' updated
+<190>Jan 17 23:44:17 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_CREATED, seq_num=9449, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'production_email' created
+<190>Jan 17 23:44:17 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_CREATED, seq_num=9449, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'production_email' created
+<190>Jan 17 23:54:03 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DEFAULT_COMPRESSION_MODIFIED, seq_num=9464, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=The system default compression was modified to true
+<190>Jan 17 23:54:03 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DEFAULT_COMPRESSION_MODIFIED, seq_num=9464, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=The system default compression was modified to true
+<190>Jan 17 23:54:19 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_GROUP_DELETED, seq_num=9481, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Group 'CO-ReadOnly' has been deleted
+<190>Jan 17 23:54:19 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_GROUP_DELETED, seq_num=9481, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Group 'CO-ReadOnly' has been deleted
+<190>Jan 17 23:54:20 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_GROUP_CREATED, seq_num=9482, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Group 'CO-ReadOnly' has been created with role:READ_ONLY. The group links to DN:'CN=Infinidat,OU=Security Groups,OU=Groups,OU=Corp,DC=infinidat,DC=com'
+<190>Jan 17 23:54:20 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_GROUP_CREATED, seq_num=9482, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Group 'CO-ReadOnly' has been created with role:READ_ONLY. The group links to DN:'CN=Infinidat,OU=Security Groups,OU=Groups,OU=Corp,DC=infinidat,DC=com'
+<190>Jan 17 23:54:25 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_DELETED, seq_num=9487, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'syslog' deleted
+<190>JAN 17 23:54:34 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=0, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing of UDP socket 1
+<190>JAN 17 23:54:34 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=0, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing of UDP socket 2
+<190>Jan 17 23:54:34 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=29418878, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing
+<190>JAN 17 23:57:59 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=0, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing of UDP socket 1
+<190>JAN 17 23:57:59 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=0, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing of UDP socket 2
+<190>Jan 17 23:57:59 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=29418878, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing
+<190>JAN 17 23:58:03 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=0, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing of UDP socket 1
+<190>JAN 17 23:58:03 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=0, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing of UDP socket 2
+<190>Jan 17 23:58:03 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=29418878, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing
+<190>Jan 17 23:58:12 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_CREATED, seq_num=9564, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'graylog' created
+<190>Jan 17 23:58:12 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_CREATED, seq_num=9564, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'graylog' created
+<190>Jan 17 23:58:15 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_UPDATED, seq_num=9567, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'graylog' updated
+<190>Jan 17 23:58:15 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_UPDATED, seq_num=9567, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'graylog' updated
+<190>Jan 17 23:58:15 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_UPDATED, seq_num=9568, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'graylog' updated
+<190>Jan 17 23:58:15 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_UPDATED, seq_num=9568, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'graylog' updated
+<190>Jan 17 23:58:15 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_UPDATED, seq_num=9569, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'graylog' updated
+<190>Jan 17 23:58:15 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_UPDATED, seq_num=9569, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'graylog' updated
+<190>Jan 17 23:58:15 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_UPDATED, seq_num=9570, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'graylog' updated
+<190>Jan 17 23:58:15 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_UPDATED, seq_num=9570, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'graylog' updated
+<190>Jan 17 23:58:15 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_UPDATED, seq_num=9571, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'graylog' updated
+<190>Jan 17 23:58:15 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_UPDATED, seq_num=9571, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'graylog' updated
+<190>Jan 17 23:58:27 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_CREATED, seq_num=9582, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'production_email' created
+<190>Jan 17 23:58:27 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_CREATED, seq_num=9582, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'production_email' created
+<190>Jan 18 00:00:16 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_DELETED, seq_num=9587, level=INFO, username=admin, system=vbox-ps-01, description=Notification rule 'syslog' deleted
+<190>Jan 18 00:00:52 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_CREATED, seq_num=9589, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'setup_email' created
+<190>Jan 18 00:00:54 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_DELETED, seq_num=9592, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'production_email' deleted
+<190>Jan 18 00:00:57 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DEFAULT_COMPRESSION_MODIFIED, seq_num=9596, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=The system default compression was modified to true
+<190>Jan 18 00:01:13 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_GROUP_DELETED, seq_num=9613, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Group 'CO-ReadOnly' has been deleted
+<190>Jan 18 00:01:14 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_GROUP_CREATED, seq_num=9614, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Group 'CO-ReadOnly' has been created with role:READ_ONLY. The group links to DN:'CN=Infinidat,OU=Security Groups,OU=Groups,OU=Corp,DC=infinidat,DC=com'
+<190>JAN 18 00:01:30 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=0, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing of UDP socket 1
+<190>JAN 18 00:01:30 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=0, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing of UDP socket 2
+<190>Jan 18 00:01:30 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=29418878, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing
+<190>JAN 18 00:02:43 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=0, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing of UDP socket 1
+<190>JAN 18 00:02:44 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=0, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing of UDP socket 2
+<190>Jan 18 00:02:44 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=29418878, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing
+<190>JAN 18 00:02:46 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=0, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing of UDP socket 1
+<190>JAN 18 00:02:46 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=0, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing of UDP socket 2
+<190>Jan 18 00:02:46 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=29418878, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing
+<190>JAN 18 00:05:36 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=0, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing of UDP socket 1
+<190>JAN 18 00:05:36 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=0, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing of UDP socket 2
+<190>Jan 18 00:05:36 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=29418878, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing
+<190>JAN 18 00:05:39 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=0, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing of UDP socket 1
+<190>JAN 18 00:05:39 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=0, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing of UDP socket 2
+<190>Jan 18 00:05:39 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=29418878, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing
+<190>Jan 18 00:05:51 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_CREATED, seq_num=9752, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'graylog' created
+<190>Jan 18 00:05:51 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_CREATED, seq_num=9752, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'graylog' created
+<190>Jan 18 00:14:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9770, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 00:14:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9770, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 00:14:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9771, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 00:14:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9771, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 00:31:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9777, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 00:31:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9777, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 00:31:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9778, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 00:31:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9778, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 00:48:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9781, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 00:48:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9781, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 00:48:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9782, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 00:48:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9782, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 01:05:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9785, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 01:05:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9785, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 01:05:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9786, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 01:05:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9786, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 01:22:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9789, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 01:22:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9789, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 01:22:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9790, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 01:22:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9790, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 01:40:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9793, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 01:40:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9793, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 01:40:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9794, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 01:40:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9794, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 01:57:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9797, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 01:57:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9797, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 01:57:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9798, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 01:57:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9798, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 02:14:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9801, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 02:14:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9801, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 02:14:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9802, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 02:14:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9802, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 02:31:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9805, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 02:31:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9806, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 02:31:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9806, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 02:48:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9809, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 02:48:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9809, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 02:48:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9810, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 02:48:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9810, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 03:05:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9813, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 03:05:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9813, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 03:05:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9814, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 03:05:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9814, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 03:22:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9817, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 03:22:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9817, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 03:22:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9818, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 03:22:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9818, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 03:39:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9821, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 03:39:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9821, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 03:39:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9822, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 03:39:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9822, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 03:56:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9825, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 03:56:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9825, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 03:56:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9826, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 03:56:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9826, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 04:13:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9829, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 04:13:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9829, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 04:13:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9830, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 04:13:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9830, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 04:30:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9833, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 04:30:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9833, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 04:30:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9834, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 04:30:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9834, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 04:47:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9837, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 04:47:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9837, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 04:47:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9838, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 04:47:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9838, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 05:04:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9841, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 05:04:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9841, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 05:04:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9842, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 05:04:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9842, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 05:21:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9845, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 05:21:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9845, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 05:21:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9846, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 05:21:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9846, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 05:38:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9849, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 05:38:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9849, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 05:38:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9850, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 05:38:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9850, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 05:56:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9853, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 05:56:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9853, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 05:56:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9854, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 05:56:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9854, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 06:13:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9857, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 06:13:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9857, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 06:13:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9858, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 06:13:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9858, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 06:30:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9861, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 06:30:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9861, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 06:30:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9862, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 06:30:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9862, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 06:47:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9865, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 06:47:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9865, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 06:47:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9866, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 06:47:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9866, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 07:04:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9869, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 07:04:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9869, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 07:04:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9870, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 07:04:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9870, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 07:21:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9873, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 07:21:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9873, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 07:21:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9874, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 07:21:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9874, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 07:38:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9877, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 07:38:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9877, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 07:38:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9878, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 07:38:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9878, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 07:55:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9881, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 07:55:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9881, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 07:55:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9882, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 07:55:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9882, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 08:12:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9885, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 08:12:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9885, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 08:12:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9886, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 08:12:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9886, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 08:29:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9889, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 08:29:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9889, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 08:29:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9890, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 08:29:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9890, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 08:46:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9891, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 08:46:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9891, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 08:46:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9892, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 08:46:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9892, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 09:03:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9895, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 09:03:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9895, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 09:03:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9896, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 09:03:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9896, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 09:20:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9899, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 09:20:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9899, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 09:20:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9900, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 09:20:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9900, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 09:37:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9903, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 09:37:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9903, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 09:37:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9904, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 09:37:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9904, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 09:54:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9907, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 09:54:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9907, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 09:54:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9908, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 09:54:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9908, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 10:11:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9911, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 10:11:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9911, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 10:11:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9912, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 10:11:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9912, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 10:28:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9915, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 10:28:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9915, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 10:28:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9916, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 10:28:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9916, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 10:45:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9919, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 10:45:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9919, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 10:45:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9920, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 10:45:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9920, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 11:02:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9923, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 11:02:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9923, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 11:02:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9924, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 11:02:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9924, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 11:19:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9927, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 11:19:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9927, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 11:19:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9928, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 11:19:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9928, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 11:36:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9931, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 11:36:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9931, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 11:36:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9932, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 11:36:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9932, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 11:53:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9935, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 11:53:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9935, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 11:53:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9936, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 11:53:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9936, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 12:10:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9939, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 12:10:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9939, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 12:10:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9940, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 12:10:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9940, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 12:28:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9943, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 12:28:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9943, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 12:28:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9944, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 12:28:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9944, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 12:45:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9947, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 12:45:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9947, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 12:45:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9948, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 12:45:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9948, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 13:02:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9951, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 13:02:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9951, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 13:02:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9952, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 13:02:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9952, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 13:19:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9955, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 13:19:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9955, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 13:19:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9956, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 13:19:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9956, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 13:36:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9959, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 13:36:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9959, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 13:36:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9960, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 13:36:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9960, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 13:53:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9963, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 13:53:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9963, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 13:53:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9964, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 13:53:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9964, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 14:10:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9967, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 14:10:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9967, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 14:10:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9968, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 14:10:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9968, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 14:27:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9971, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 14:27:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9971, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 14:27:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9972, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 14:27:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9972, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 14:44:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9975, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 14:44:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9975, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 14:44:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9976, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 14:44:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9976, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 15:01:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9979, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 15:01:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9979, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 15:01:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9980, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 15:01:00 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_SESSION_EXPIRED, seq_num=9980, level=INFO, username=infinidat, system=vbox-ps-01, description=Session expired (Idle) for user 'infinidat' from IP 172.20.89.195. Client ID: 'infinisdk.v235.0.0.dev3.auto-view.lab.gdc.il.infinidat.com.root.778'
+<190>Jan 18 15:08:37 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DEFAULT_COMPRESSION_MODIFIED, seq_num=9991, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=The system default compression was modified to true
+<190>Jan 18 15:08:37 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DEFAULT_COMPRESSION_MODIFIED, seq_num=9991, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=The system default compression was modified to true
+<190>Jan 18 15:08:53 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_GROUP_DELETED, seq_num=10008, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Group 'CO-ReadOnly' has been deleted
+<190>Jan 18 15:08:53 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_GROUP_DELETED, seq_num=10008, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Group 'CO-ReadOnly' has been deleted
+<190>Jan 18 15:08:54 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_GROUP_CREATED, seq_num=10009, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Group 'CO-ReadOnly' has been created with role:READ_ONLY. The group links to DN:'CN=Infinidat,OU=Security Groups,OU=Groups,OU=Corp,DC=infinidat,DC=com'
+<190>Jan 18 15:08:54 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=USER_GROUP_CREATED, seq_num=10009, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Group 'CO-ReadOnly' has been created with role:READ_ONLY. The group links to DN:'CN=Infinidat,OU=Security Groups,OU=Groups,OU=Corp,DC=infinidat,DC=com'
+<190>Jan 18 15:08:59 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_DELETED, seq_num=10014, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'syslog1' deleted
+<190>JAN 18 15:09:08 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=0, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing of UDP socket 1
+<190>JAN 18 15:09:08 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=0, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing of UDP socket 2
+<190>Jan 18 15:09:08 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=29418878, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing
+<190>JAN 18 15:09:11 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=0, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing of UDP socket 1
+<190>JAN 18 15:09:12 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=0, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing of UDP socket 2
+<190>Jan 18 15:09:12 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=DUMMY_TEST_EVENT, seq_num=29418878, level=INFO, username=admin, system=vbox-ps-01, description=Dummy event for notification testing
+<190>Jan 18 15:09:18 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_CREATED, seq_num=10035, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'syslog1' created
+<190>Jan 18 15:09:20 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_CREATED, seq_num=10038, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'syslog2' created
+<190>Jan 18 15:09:20 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_CREATED, seq_num=10038, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'syslog2' created
+<190>Jan 18 15:09:23 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_CREATED, seq_num=10041, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'graylog' created
+<190>Jan 18 15:09:23 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_CREATED, seq_num=10041, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'graylog' created
+<190>Jan 18 15:09:35 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_CREATED, seq_num=10052, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'production_email' created
+<190>Jan 18 15:09:35 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_CREATED, seq_num=10052, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'production_email' created
+<190>Jan 18 15:09:38 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_DELETED, seq_num=10055, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'setup_email' deleted
+<190>Jan 18 15:09:38 172.20.68.167 INFINIBOX 30368 172.20.68.167: code=NOTIFICATION_RULE_DELETED, seq_num=10055, level=INFO, username=psus-gitlab-cicd, system=vbox-ps-01, description=Notification rule 'setup_email' deleted
diff --git a/ansible_collections/infinidat/infinibox/scripts/syslog_server.py b/ansible_collections/infinidat/infinibox/scripts/syslog_server.py
new file mode 100755
index 000000000..fe7f6d18d
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/scripts/syslog_server.py
@@ -0,0 +1,52 @@
+#!/usr/bin/env python
+
+"""
+Tiny Syslog Server in Python.
+
+This is a tiny syslog server that is able to receive UDP based syslog
+entries on a specified port and save them to a file.
+That's it... it does nothing else...
+There are a few configuration parameters. These can be set via env vars.
+Usage: sudo ./syslog_server.py
+"""
+
+import os
+import logging
+import socketserver
+
+# User Configuration variables:
+LOG_FILE = os.environ.get('LOG_FILE', 'syslog.log')
+HOST = os.environ.get('HOST', "0.0.0.0")
+PORT = int(os.environ.get('PORT', 514))
+
+logging.basicConfig(
+ level=logging.INFO,
+ format='%(message)s',
+ datefmt='',
+ filename=LOG_FILE,
+ filemode='a'
+)
+
+
+class SyslogUDPHandler(socketserver.BaseRequestHandler):
+ """ A handler """
+
+ def handle(self):
+ """ Handle data """
+ data = bytes.decode(self.request[0].strip())
+ # socket = self.request[1]
+ print(f"{self.client_address[0]}: {str(data)}")
+ logging.info(str(data))
+
+
+if __name__ == "__main__":
+ try:
+ server = socketserver.UDPServer((HOST, PORT), SyslogUDPHandler)
+ print(f"Starting server on host {HOST}:{PORT} using file {LOG_FILE}...")
+ server.serve_forever(poll_interval=0.5)
+ except PermissionError:
+ print("Permission denied while trying to start the server. Try sudo.")
+ except (IOError, SystemExit): # pylint: disable=try-except-raise
+ raise
+ except KeyboardInterrupt:
+ print("\nShutting down...")
diff --git a/ansible_collections/infinidat/infinibox/tests/config.yml b/ansible_collections/infinidat/infinibox/tests/config.yml
new file mode 100644
index 000000000..9e402bda7
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/tests/config.yml
@@ -0,0 +1,2 @@
+modules:
+ python_requires: ">=3.6"
diff --git a/ansible_collections/infinidat/infinibox/tests/hacking/infini_certificate_absent.json b/ansible_collections/infinidat/infinibox/tests/hacking/infini_certificate_absent.json
new file mode 100644
index 000000000..7355854b6
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/tests/hacking/infini_certificate_absent.json
@@ -0,0 +1,8 @@
+{
+ "ANSIBLE_MODULE_ARGS": {
+ "state": "absent",
+ "system": "172.20.68.167",
+ "user": "admin",
+ "password": "123456"
+ }
+}
diff --git a/ansible_collections/infinidat/infinibox/tests/hacking/infini_certificate_present.json b/ansible_collections/infinidat/infinibox/tests/hacking/infini_certificate_present.json
new file mode 100644
index 000000000..2786904a3
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/tests/hacking/infini_certificate_present.json
@@ -0,0 +1,9 @@
+{
+ "ANSIBLE_MODULE_ARGS": {
+ "certificate_file_name": "/home/stack/workspace/ansible-infinidat-collection/signed-certificate-with-pkey.pem",
+ "state": "present",
+ "system": "172.20.68.167",
+ "user": "admin",
+ "password": "123456"
+ }
+}
diff --git a/ansible_collections/infinidat/infinibox/tests/hacking/infini_certificate_stat.json b/ansible_collections/infinidat/infinibox/tests/hacking/infini_certificate_stat.json
new file mode 100644
index 000000000..ea39ee8b3
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/tests/hacking/infini_certificate_stat.json
@@ -0,0 +1,8 @@
+{
+ "ANSIBLE_MODULE_ARGS": {
+ "state": "stat",
+ "system": "172.20.68.167",
+ "user": "admin",
+ "password": "123456"
+ }
+}
diff --git a/ansible_collections/infinidat/infinibox/tests/hacking/infini_cluster_absent.json b/ansible_collections/infinidat/infinibox/tests/hacking/infini_cluster_absent.json
new file mode 100644
index 000000000..cc35770b5
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/tests/hacking/infini_cluster_absent.json
@@ -0,0 +1,9 @@
+{
+ "ANSIBLE_MODULE_ARGS": {
+ "name": "PSUS_ANSIBLE_cluster",
+ "state": "absent",
+ "system": "172.20.68.167",
+ "user": "admin",
+ "password": "123456"
+ }
+}
diff --git a/ansible_collections/infinidat/infinibox/tests/hacking/infini_cluster_present.json b/ansible_collections/infinidat/infinibox/tests/hacking/infini_cluster_present.json
new file mode 100644
index 000000000..73f145629
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/tests/hacking/infini_cluster_present.json
@@ -0,0 +1,15 @@
+{
+ "ANSIBLE_MODULE_ARGS": {
+ "name": "PSUS_ANSIBLE_cluster",
+ "cluster_hosts": [
+ {
+ "host_name": "PSUS_ANSIBLE_host",
+ "host_cluster_state": "present"
+ }
+ ],
+ "state": "present",
+ "system": "172.20.68.167",
+ "user": "admin",
+ "password": "123456"
+ }
+}
diff --git a/ansible_collections/infinidat/infinibox/tests/hacking/infini_cluster_stat.json b/ansible_collections/infinidat/infinibox/tests/hacking/infini_cluster_stat.json
new file mode 100644
index 000000000..c72df1ef0
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/tests/hacking/infini_cluster_stat.json
@@ -0,0 +1,9 @@
+{
+ "ANSIBLE_MODULE_ARGS": {
+ "name": "PSUS_ANSIBLE_cluster",
+ "state": "stat",
+ "system": "172.20.68.167",
+ "user": "admin",
+ "password": "123456"
+ }
+}
diff --git a/ansible_collections/infinidat/infinibox/tests/hacking/infini_event_present.json b/ansible_collections/infinidat/infinibox/tests/hacking/infini_event_present.json
new file mode 100644
index 000000000..2b7927deb
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/tests/hacking/infini_event_present.json
@@ -0,0 +1,10 @@
+{
+ "ANSIBLE_MODULE_ARGS": {
+ "description_template": "A test message",
+ "level": "CRITICAL",
+ "state": "present",
+ "system": "172.20.68.167",
+ "user": "admin",
+ "password": "123456"
+ }
+}
diff --git a/ansible_collections/infinidat/infinibox/tests/hacking/infini_fibre_channel_switch_rename.json b/ansible_collections/infinidat/infinibox/tests/hacking/infini_fibre_channel_switch_rename.json
new file mode 100644
index 000000000..1bc9855b4
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/tests/hacking/infini_fibre_channel_switch_rename.json
@@ -0,0 +1,10 @@
+{
+ "ANSIBLE_MODULE_ARGS": {
+ "switch_name": "barfoo",
+ "new_switch_name": "VSAN 100",
+ "state": "rename",
+ "system": "ibox1521",
+ "user": "admin",
+ "password": "123456"
+ }
+}
diff --git a/ansible_collections/infinidat/infinibox/tests/hacking/infini_fibre_channel_switch_stat.json b/ansible_collections/infinidat/infinibox/tests/hacking/infini_fibre_channel_switch_stat.json
new file mode 100644
index 000000000..73e8e0b70
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/tests/hacking/infini_fibre_channel_switch_stat.json
@@ -0,0 +1,9 @@
+{
+ "ANSIBLE_MODULE_ARGS": {
+ "switch_name": "VSAN 100",
+ "state": "stat",
+ "system": "ibox1521",
+ "user": "admin",
+ "password": "123456"
+ }
+}
diff --git a/ansible_collections/infinidat/infinibox/tests/hacking/infini_fs_absent.json b/ansible_collections/infinidat/infinibox/tests/hacking/infini_fs_absent.json
new file mode 100644
index 000000000..a39970222
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/tests/hacking/infini_fs_absent.json
@@ -0,0 +1,10 @@
+{
+ "ANSIBLE_MODULE_ARGS": {
+ "name": "PSUS_ANSIBLE_fs",
+ "pool": "PSUS_ANSIBLE_pool",
+ "state": "absent",
+ "system": "172.20.68.167",
+ "user": "admin",
+ "password": "123456"
+ }
+}
diff --git a/ansible_collections/infinidat/infinibox/tests/hacking/infini_fs_present.json b/ansible_collections/infinidat/infinibox/tests/hacking/infini_fs_present.json
new file mode 100644
index 000000000..b367d76fa
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/tests/hacking/infini_fs_present.json
@@ -0,0 +1,13 @@
+{
+ "ANSIBLE_MODULE_ARGS": {
+ "name": "PSUS_ANSIBLE_fs_snap",
+ "parent_fs_name": "PSUS_ANSIBLE_fs",
+ "restore_fs_from_snapshot": true,
+ "fs_type": "snapshot",
+ "pool": "PSUS_ANSIBLE_pool",
+ "state": "present",
+ "system": "172.20.68.167",
+ "user": "admin",
+ "password": "123456"
+ }
+}
diff --git a/ansible_collections/infinidat/infinibox/tests/hacking/infini_fs_stat.json b/ansible_collections/infinidat/infinibox/tests/hacking/infini_fs_stat.json
new file mode 100644
index 000000000..ae081a687
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/tests/hacking/infini_fs_stat.json
@@ -0,0 +1,11 @@
+{
+ "ANSIBLE_MODULE_ARGS": {
+ "name": "PSUS_ANSIBLE_fs",
+ "fs_type": "master",
+ "pool": "PSUS_ANSIBLE_pool",
+ "state": "stat",
+ "system": "172.20.68.167",
+ "user": "admin",
+ "password": "123456"
+ }
+}
diff --git a/ansible_collections/infinidat/infinibox/tests/hacking/infini_host_absent.json b/ansible_collections/infinidat/infinibox/tests/hacking/infini_host_absent.json
new file mode 100644
index 000000000..ca7ca8133
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/tests/hacking/infini_host_absent.json
@@ -0,0 +1,9 @@
+{
+ "ANSIBLE_MODULE_ARGS": {
+ "name": "PSUS_ANSIBLE_host",
+ "state": "absent",
+ "system": "172.20.68.167",
+ "user": "admin",
+ "password": "123456"
+ }
+}
diff --git a/ansible_collections/infinidat/infinibox/tests/hacking/infini_host_present.json b/ansible_collections/infinidat/infinibox/tests/hacking/infini_host_present.json
new file mode 100644
index 000000000..24e0e970b
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/tests/hacking/infini_host_present.json
@@ -0,0 +1,9 @@
+{
+ "ANSIBLE_MODULE_ARGS": {
+ "name": "PSUS_ANSIBLE_host",
+ "state": "present",
+ "system": "172.20.68.167",
+ "user": "admin",
+ "password": "123456"
+ }
+}
diff --git a/ansible_collections/infinidat/infinibox/tests/hacking/infini_host_stat.json b/ansible_collections/infinidat/infinibox/tests/hacking/infini_host_stat.json
new file mode 100644
index 000000000..ca72c99c8
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/tests/hacking/infini_host_stat.json
@@ -0,0 +1,9 @@
+{
+ "ANSIBLE_MODULE_ARGS": {
+ "name": "PSUS_ANSIBLE_host",
+ "state": "stat",
+ "system": "172.20.68.167",
+ "user": "admin",
+ "password": "123456"
+ }
+}
diff --git a/ansible_collections/infinidat/infinibox/tests/hacking/infini_map_absent.json b/ansible_collections/infinidat/infinibox/tests/hacking/infini_map_absent.json
new file mode 100644
index 000000000..ece0dfd93
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/tests/hacking/infini_map_absent.json
@@ -0,0 +1,10 @@
+{
+ "ANSIBLE_MODULE_ARGS": {
+ "cluster": "PSUS_ANSIBLE_cluster",
+ "volume": "PSUS_ANSIBLE_vol",
+ "state": "absent",
+ "system": "172.20.68.167",
+ "user": "admin",
+ "password": "123456"
+ }
+}
diff --git a/ansible_collections/infinidat/infinibox/tests/hacking/infini_map_present.json b/ansible_collections/infinidat/infinibox/tests/hacking/infini_map_present.json
new file mode 100644
index 000000000..553b847b9
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/tests/hacking/infini_map_present.json
@@ -0,0 +1,10 @@
+{
+ "ANSIBLE_MODULE_ARGS": {
+ "cluster": "PSUS_ANSIBLE_cluster",
+ "volume": "PSUS_ANSIBLE_vol",
+ "state": "present",
+ "system": "172.20.68.167",
+ "user": "admin",
+ "password": "123456"
+ }
+}
diff --git a/ansible_collections/infinidat/infinibox/tests/hacking/infini_map_stat.json b/ansible_collections/infinidat/infinibox/tests/hacking/infini_map_stat.json
new file mode 100644
index 000000000..229fe67d2
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/tests/hacking/infini_map_stat.json
@@ -0,0 +1,10 @@
+{
+ "ANSIBLE_MODULE_ARGS": {
+ "cluster": "PSUS_ANSIBLE_cluster",
+ "volume": "PSUS_ANSIBLE_vol",
+ "state": "stat",
+ "system": "172.20.68.167",
+ "user": "admin",
+ "password": "123456"
+ }
+}
diff --git a/ansible_collections/infinidat/infinibox/tests/hacking/infini_metadata_absent.json b/ansible_collections/infinidat/infinibox/tests/hacking/infini_metadata_absent.json
new file mode 100644
index 000000000..fbf13b0d0
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/tests/hacking/infini_metadata_absent.json
@@ -0,0 +1,11 @@
+{
+ "ANSIBLE_MODULE_ARGS": {
+ "object_name": "PSUS_ANSIBLE_pool",
+ "object_type": "pool",
+ "key": "foo",
+ "state": "absent",
+ "system": "172.20.68.167",
+ "user": "admin",
+ "password": "123456"
+ }
+}
diff --git a/ansible_collections/infinidat/infinibox/tests/hacking/infini_metadata_present.json b/ansible_collections/infinidat/infinibox/tests/hacking/infini_metadata_present.json
new file mode 100644
index 000000000..9efcff783
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/tests/hacking/infini_metadata_present.json
@@ -0,0 +1,12 @@
+{
+ "ANSIBLE_MODULE_ARGS": {
+ "object_name": "PSUS_ANSIBLE_pool",
+ "object_type": "pool",
+ "key": "foo",
+ "value": "bar",
+ "state": "present",
+ "system": "172.20.68.167",
+ "user": "admin",
+ "password": "123456"
+ }
+}
diff --git a/ansible_collections/infinidat/infinibox/tests/hacking/infini_metadata_stat.json b/ansible_collections/infinidat/infinibox/tests/hacking/infini_metadata_stat.json
new file mode 100644
index 000000000..ef31dc178
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/tests/hacking/infini_metadata_stat.json
@@ -0,0 +1,11 @@
+{
+ "ANSIBLE_MODULE_ARGS": {
+ "object_name": "PSUS_ANSIBLE_pool",
+ "object_type": "pool",
+ "key": "foo",
+ "state": "stat",
+ "system": "172.20.68.167",
+ "user": "admin",
+ "password": "123456"
+ }
+}
diff --git a/ansible_collections/infinidat/infinibox/tests/hacking/infini_network_space_absent.json b/ansible_collections/infinidat/infinibox/tests/hacking/infini_network_space_absent.json
new file mode 100644
index 000000000..77835fbb1
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/tests/hacking/infini_network_space_absent.json
@@ -0,0 +1,9 @@
+{
+ "ANSIBLE_MODULE_ARGS": {
+ "name": "Replication",
+ "state": "absent",
+ "system": "172.20.68.167",
+ "user": "admin",
+ "password": "123456"
+ }
+}
diff --git a/ansible_collections/infinidat/infinibox/tests/hacking/infini_network_space_present.json b/ansible_collections/infinidat/infinibox/tests/hacking/infini_network_space_present.json
new file mode 100644
index 000000000..f982b6b80
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/tests/hacking/infini_network_space_present.json
@@ -0,0 +1,26 @@
+{
+ "ANSIBLE_MODULE_ARGS": {
+ "name": "Replication",
+ "service": "RMR_SERVICE",
+ "async_only": false,
+ "mtu": 1500,
+ "rate_limit": 10,
+ "network": "172.20.0.0",
+ "netmask": "16",
+ "default_gateway": "172.20.95.254",
+ "interfaces": [80, 81, 82],
+ "ips": [
+ "172.20.50.111",
+ "172.20.50.70",
+ "172.20.49.243",
+ "172.20.49.241",
+ "172.20.49.239",
+ "172.20.49.237",
+ "172.20.49.235",
+ "172.20.49.233"
+ ],
+ "system": "172.20.68.167",
+ "user": "admin",
+ "password": "123456"
+ }
+}
diff --git a/ansible_collections/infinidat/infinibox/tests/hacking/infini_network_space_stat.json b/ansible_collections/infinidat/infinibox/tests/hacking/infini_network_space_stat.json
new file mode 100644
index 000000000..2d58c36ad
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/tests/hacking/infini_network_space_stat.json
@@ -0,0 +1,9 @@
+{
+ "ANSIBLE_MODULE_ARGS": {
+ "name": "Replication",
+ "state": "stat",
+ "system": "172.20.68.167",
+ "user": "admin",
+ "password": "123456"
+ }
+}
diff --git a/ansible_collections/infinidat/infinibox/tests/hacking/infini_notification_rule_absent.json b/ansible_collections/infinidat/infinibox/tests/hacking/infini_notification_rule_absent.json
new file mode 100644
index 000000000..30c824009
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/tests/hacking/infini_notification_rule_absent.json
@@ -0,0 +1,9 @@
+{
+ "ANSIBLE_MODULE_ARGS": {
+ "name": "my-event-rule",
+ "state": "absent",
+ "system": "172.20.68.167",
+ "user": "admin",
+ "password": "123456"
+ }
+}
diff --git a/ansible_collections/infinidat/infinibox/tests/hacking/infini_notification_rule_present.json b/ansible_collections/infinidat/infinibox/tests/hacking/infini_notification_rule_present.json
new file mode 100644
index 000000000..e51182274
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/tests/hacking/infini_notification_rule_present.json
@@ -0,0 +1,11 @@
+{
+ "ANSIBLE_MODULE_ARGS": {
+ "name": "my-event-rule",
+ "target": "syslog1_target",
+ "event_level": [ "info" ],
+ "state": "present",
+ "system": "172.20.68.167",
+ "user": "admin",
+ "password": "123456"
+ }
+}
diff --git a/ansible_collections/infinidat/infinibox/tests/hacking/infini_notification_rule_stat.json b/ansible_collections/infinidat/infinibox/tests/hacking/infini_notification_rule_stat.json
new file mode 100644
index 000000000..a5e60a20d
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/tests/hacking/infini_notification_rule_stat.json
@@ -0,0 +1,9 @@
+{
+ "ANSIBLE_MODULE_ARGS": {
+ "name": "my-event-rule",
+ "state": "stat",
+ "system": "172.20.68.167",
+ "user": "admin",
+ "password": "123456"
+ }
+}
diff --git a/ansible_collections/infinidat/infinibox/tests/hacking/infini_notification_target_absent.json b/ansible_collections/infinidat/infinibox/tests/hacking/infini_notification_target_absent.json
new file mode 100644
index 000000000..2a948c79a
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/tests/hacking/infini_notification_target_absent.json
@@ -0,0 +1,9 @@
+{
+ "ANSIBLE_MODULE_ARGS": {
+ "name": "my-notification-target",
+ "state": "absent",
+ "system": "172.20.68.167",
+ "user": "admin",
+ "password": "123456"
+ }
+}
diff --git a/ansible_collections/infinidat/infinibox/tests/hacking/infini_notification_target_present.json b/ansible_collections/infinidat/infinibox/tests/hacking/infini_notification_target_present.json
new file mode 100644
index 000000000..0c2012efe
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/tests/hacking/infini_notification_target_present.json
@@ -0,0 +1,15 @@
+{
+ "ANSIBLE_MODULE_ARGS": {
+ "name": "my-notification-target",
+ "host": ["syslog.infinidat.com"],
+ "transport": "UDP",
+ "protocol": "SYSLOG",
+ "facility": "LOCAL7",
+ "visibility": "CUSTOMER",
+ "post_test": false,
+ "state": "present",
+ "system": "172.20.68.167",
+ "user": "admin",
+ "password": "123456"
+ }
+}
diff --git a/ansible_collections/infinidat/infinibox/tests/hacking/infini_notification_target_stat.json b/ansible_collections/infinidat/infinibox/tests/hacking/infini_notification_target_stat.json
new file mode 100644
index 000000000..67a36bb7d
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/tests/hacking/infini_notification_target_stat.json
@@ -0,0 +1,9 @@
+{
+ "ANSIBLE_MODULE_ARGS": {
+ "name": "my-notification-target",
+ "state": "stat",
+ "system": "172.20.68.167",
+ "user": "admin",
+ "password": "123456"
+ }
+}
diff --git a/ansible_collections/infinidat/infinibox/tests/hacking/infini_sso_absent.json b/ansible_collections/infinidat/infinibox/tests/hacking/infini_sso_absent.json
new file mode 100644
index 000000000..e72ef078f
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/tests/hacking/infini_sso_absent.json
@@ -0,0 +1,9 @@
+{
+ "ANSIBLE_MODULE_ARGS": {
+ "name": "OKTA",
+ "state": "absent",
+ "system": "172.20.68.167",
+ "user": "admin",
+ "password": "123456"
+ }
+}
diff --git a/ansible_collections/infinidat/infinibox/tests/hacking/infini_sso_present.json b/ansible_collections/infinidat/infinibox/tests/hacking/infini_sso_present.json
new file mode 100644
index 000000000..907887cb9
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/tests/hacking/infini_sso_present.json
@@ -0,0 +1,15 @@
+{
+ "ANSIBLE_MODULE_ARGS": {
+ "issuer": "http://www.okta.com/exkra32oyyU6KCUCk2p7",
+ "name": "OKTA",
+ "sign_on_url": "https://infinidat.okta.com/app/infinidat_ibox2503_1/exkrwdi7dmXSKdC4l2p7/sso/saml",
+ "signed_assertion": false,
+ "signed_response": false,
+ "signing_certificate": "MIIDojCCAoqgAwIBAgIGAY1bQQXzMA0GCSqGSIb3DQEBCwUAMIGRMQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEWMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzENMAsGA1UECgwET2t0YTEUMBIGA1UECwwLU1NPUHJvdmlkZXIxEjAQBgNVBAMMCWluZmluaWRhdDEcMBoGCSqGSIb3DQEJARYNaW5mb0Bva3RhLmNvbTAeFw0yNDAxMzAxNjQyMjRaFw0zNDAxMzAxNjQzMjRaMIGRMQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEWMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzENMAsGA1UECgwET2t0YTEUMBIGA1UECwwLU1NPUHJvdmlkZXIxEjAQBgNVBAMMCWluZmluaWRhdDEcMBoGCSqGSIb3DQEJARYNaW5mb0Bva3RhLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOJmwPjImRqI9scAXMNQbVlthi8Ogx7yGZhfNiqjd+V2ZAHitqTh3L/Bcn+hCiau8eBDFUsumR0P0RHpzulnHKoylAORohDnhuWPZw+xdlflsHXu84jC9cLWGYVHfeEpf1tosLKg/NdbJp34AuYQifythqJAsCwjz+j3cL/Xe2LPqCHgGG1NCPpbD4Gcavh0IVi3C749M3Cy+PLKHh/c/kfrbTdPHP3Y5hROBxwDCdHm5osLZA5V77gZBxU08xeo3IDy2IcqgH2uEI+dgeW9aRZWhG6tHCPYUhuIdpRX1fJhyrXNH6ZOFV0NyBqUxUDv2URRHwgXMLdkf6ujN+Cu2nECAwEAATANBgkqhkiG9w0BAQsFAAOCAQEAHJUIfV51MwZ8LfYs0BW5eN6lZCqdzmIjv1fZ6NftIYm7FTVvMo49/lOCRuIYbFvWj/y9kJytu65iM5DUNGOxs+PIaKkt1bXwhvHtG5PKoAcDQj6jhN8C4/hxqz9HAOfLdJrH7RfRxJByc2Z7tAPsTDhPu52lKZrYpF+5Hwyy+Gjoqj1DUQx8u7qCMq2Rpp/iYmuk5/r0CVFyoQS6pVAX0UZqx+Mu221jD5+KkJoaTqU8twb9V9aPE4nCGyrA2u/sjqOt5TPBAGTMKx7yTo8NmxmqEJ+FDUXUeqJLzbeh+NEn6Dy7zJCcyq2lVhqJGALvGrh6CvbSuiOlRv1LDL9N+g==",
+ "enabled": true,
+ "state": "present",
+ "system": "172.20.68.167",
+ "user": "admin",
+ "password": "123456"
+ }
+}
diff --git a/ansible_collections/infinidat/infinibox/tests/hacking/infini_sso_stat.json b/ansible_collections/infinidat/infinibox/tests/hacking/infini_sso_stat.json
new file mode 100644
index 000000000..b45d99e19
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/tests/hacking/infini_sso_stat.json
@@ -0,0 +1,9 @@
+{
+ "ANSIBLE_MODULE_ARGS": {
+ "name": "OKTA",
+ "state": "stat",
+ "system": "172.20.68.167",
+ "user": "admin",
+ "password": "123456"
+ }
+}
diff --git a/ansible_collections/infinidat/infinibox/tests/hacking/infini_user_absent.json b/ansible_collections/infinidat/infinibox/tests/hacking/infini_user_absent.json
new file mode 100644
index 000000000..a42977dfb
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/tests/hacking/infini_user_absent.json
@@ -0,0 +1,9 @@
+{
+ "ANSIBLE_MODULE_ARGS": {
+ "user_ldap_group_name": "ldap_group_foo",
+ "state": "absent",
+ "system": "172.20.68.167",
+ "user": "admin",
+ "password": "123456"
+ }
+}
diff --git a/ansible_collections/infinidat/infinibox/tests/hacking/infini_user_ldap_absent.json b/ansible_collections/infinidat/infinibox/tests/hacking/infini_user_ldap_absent.json
new file mode 100644
index 000000000..a42977dfb
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/tests/hacking/infini_user_ldap_absent.json
@@ -0,0 +1,9 @@
+{
+ "ANSIBLE_MODULE_ARGS": {
+ "user_ldap_group_name": "ldap_group_foo",
+ "state": "absent",
+ "system": "172.20.68.167",
+ "user": "admin",
+ "password": "123456"
+ }
+}
diff --git a/ansible_collections/infinidat/infinibox/tests/hacking/infini_user_ldap_present.json b/ansible_collections/infinidat/infinibox/tests/hacking/infini_user_ldap_present.json
new file mode 100644
index 000000000..d1670d790
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/tests/hacking/infini_user_ldap_present.json
@@ -0,0 +1,16 @@
+{
+ "ANSIBLE_MODULE_ARGS": {
+ "state": "present",
+ "user_ldap_group_name": "ldap_group_foo",
+ "user_ldap_group_ldap": "PSUS_ANSIBLE_ad",
+ "user_ldap_group_dn": "CN=Infinidat,OU=Security Groups,OU=Groups,OU=Corp,DC=infinidat,DC=com",
+ "user_ldap_group_role": "pool_admin",
+ "user_ldap_group_pools": "PSUS_ANSIBLE_pool",
+ "user_email": "dohlemacher@infinidat.com",
+ "user_password": "123456",
+ "user_role": "admin",
+ "system": "vbox-ps-01.lab.gdc.il.infinidat.com",
+ "user": "admin",
+ "password": "123456"
+ }
+}
diff --git a/ansible_collections/infinidat/infinibox/tests/hacking/infini_user_ldap_stat.json b/ansible_collections/infinidat/infinibox/tests/hacking/infini_user_ldap_stat.json
new file mode 100644
index 000000000..5bb951386
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/tests/hacking/infini_user_ldap_stat.json
@@ -0,0 +1,9 @@
+{
+ "ANSIBLE_MODULE_ARGS": {
+ "user_ldap_group_name": "ldap_group_foo",
+ "state": "stat",
+ "system": "172.20.68.167",
+ "user": "admin",
+ "password": "123456"
+ }
+}
diff --git a/ansible_collections/infinidat/infinibox/tests/hacking/infini_user_login.json b/ansible_collections/infinidat/infinibox/tests/hacking/infini_user_login.json
new file mode 100644
index 000000000..fec7fd9c1
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/tests/hacking/infini_user_login.json
@@ -0,0 +1,10 @@
+{
+ "ANSIBLE_MODULE_ARGS": {
+ "state": "login",
+ "user_name": "admin",
+ "user_password": "123456",
+ "system": "172.20.68.167",
+ "user": "admin",
+ "password": "123456"
+ }
+}
diff --git a/ansible_collections/infinidat/infinibox/tests/hacking/infini_user_present.json b/ansible_collections/infinidat/infinibox/tests/hacking/infini_user_present.json
new file mode 100644
index 000000000..d1670d790
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/tests/hacking/infini_user_present.json
@@ -0,0 +1,16 @@
+{
+ "ANSIBLE_MODULE_ARGS": {
+ "state": "present",
+ "user_ldap_group_name": "ldap_group_foo",
+ "user_ldap_group_ldap": "PSUS_ANSIBLE_ad",
+ "user_ldap_group_dn": "CN=Infinidat,OU=Security Groups,OU=Groups,OU=Corp,DC=infinidat,DC=com",
+ "user_ldap_group_role": "pool_admin",
+ "user_ldap_group_pools": "PSUS_ANSIBLE_pool",
+ "user_email": "dohlemacher@infinidat.com",
+ "user_password": "123456",
+ "user_role": "admin",
+ "system": "vbox-ps-01.lab.gdc.il.infinidat.com",
+ "user": "admin",
+ "password": "123456"
+ }
+}
diff --git a/ansible_collections/infinidat/infinibox/tests/hacking/infini_user_simple_absent.json b/ansible_collections/infinidat/infinibox/tests/hacking/infini_user_simple_absent.json
new file mode 100644
index 000000000..ef47ced31
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/tests/hacking/infini_user_simple_absent.json
@@ -0,0 +1,9 @@
+{
+ "ANSIBLE_MODULE_ARGS": {
+ "user_name": "foo",
+ "state": "absent",
+ "system": "172.20.68.167",
+ "user": "admin",
+ "password": "123456"
+ }
+}
diff --git a/ansible_collections/infinidat/infinibox/tests/hacking/infini_user_simple_login.json b/ansible_collections/infinidat/infinibox/tests/hacking/infini_user_simple_login.json
new file mode 100644
index 000000000..fec7fd9c1
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/tests/hacking/infini_user_simple_login.json
@@ -0,0 +1,10 @@
+{
+ "ANSIBLE_MODULE_ARGS": {
+ "state": "login",
+ "user_name": "admin",
+ "user_password": "123456",
+ "system": "172.20.68.167",
+ "user": "admin",
+ "password": "123456"
+ }
+}
diff --git a/ansible_collections/infinidat/infinibox/tests/hacking/infini_user_simple_present.json b/ansible_collections/infinidat/infinibox/tests/hacking/infini_user_simple_present.json
new file mode 100644
index 000000000..e55dc4b5d
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/tests/hacking/infini_user_simple_present.json
@@ -0,0 +1,13 @@
+{
+ "ANSIBLE_MODULE_ARGS": {
+ "state": "present",
+ "user_name": "foo",
+ "user_email": "dohlemacher@infinidat.com",
+ "user_password": "123456",
+ "user_role": "pool_admin",
+ "user_pool": "PSUS_ANSIBLE_pool",
+ "system": "172.20.68.167",
+ "user": "admin",
+ "password": "123456"
+ }
+}
diff --git a/ansible_collections/infinidat/infinibox/tests/hacking/infini_user_simple_stat.json b/ansible_collections/infinidat/infinibox/tests/hacking/infini_user_simple_stat.json
new file mode 100644
index 000000000..0d3d34ea1
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/tests/hacking/infini_user_simple_stat.json
@@ -0,0 +1,9 @@
+{
+ "ANSIBLE_MODULE_ARGS": {
+ "user_name": "foo",
+ "state": "stat",
+ "system": "172.20.68.167",
+ "user": "admin",
+ "password": "123456"
+ }
+}
diff --git a/ansible_collections/infinidat/infinibox/tests/hacking/infini_user_stat.json b/ansible_collections/infinidat/infinibox/tests/hacking/infini_user_stat.json
new file mode 100644
index 000000000..5bb951386
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/tests/hacking/infini_user_stat.json
@@ -0,0 +1,9 @@
+{
+ "ANSIBLE_MODULE_ARGS": {
+ "user_ldap_group_name": "ldap_group_foo",
+ "state": "stat",
+ "system": "172.20.68.167",
+ "user": "admin",
+ "password": "123456"
+ }
+}
diff --git a/ansible_collections/infinidat/infinibox/tests/hacking/infini_users_repository_absent.json b/ansible_collections/infinidat/infinibox/tests/hacking/infini_users_repository_absent.json
new file mode 100644
index 000000000..11661d12e
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/tests/hacking/infini_users_repository_absent.json
@@ -0,0 +1,9 @@
+{
+ "ANSIBLE_MODULE_ARGS": {
+ "name": "PSUS_ANSIBLE_ad",
+ "state": "absent",
+ "system": "172.20.68.187",
+ "user": "dohlemacher",
+ "password": "123456"
+ }
+}
diff --git a/ansible_collections/infinidat/infinibox/tests/hacking/infini_users_repository_ad_absent.json b/ansible_collections/infinidat/infinibox/tests/hacking/infini_users_repository_ad_absent.json
new file mode 100644
index 000000000..11661d12e
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/tests/hacking/infini_users_repository_ad_absent.json
@@ -0,0 +1,9 @@
+{
+ "ANSIBLE_MODULE_ARGS": {
+ "name": "PSUS_ANSIBLE_ad",
+ "state": "absent",
+ "system": "172.20.68.187",
+ "user": "dohlemacher",
+ "password": "123456"
+ }
+}
diff --git a/ansible_collections/infinidat/infinibox/tests/hacking/infini_users_repository_ad_present.json b/ansible_collections/infinidat/infinibox/tests/hacking/infini_users_repository_ad_present.json
new file mode 100644
index 000000000..5566ca5cb
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/tests/hacking/infini_users_repository_ad_present.json
@@ -0,0 +1,19 @@
+{
+ "ANSIBLE_MODULE_ARGS": {
+ "name": "PSUS_ANSIBLE_ad",
+ "bind_password": "tuFrAxahuYe4",
+ "bind_username": "conldap",
+ "ad_domain_name": "infinidat.com",
+ "repository_type": "ActiveDirectory",
+ "schema_group_class": "group",
+ "schema_group_memberof_attribute": "memberof",
+ "schema_group_name_attribute": "cn",
+ "schema_groups_basedn": "",
+ "schema_user_class": "user",
+ "schema_username_attribute": "sAMAccountName",
+ "state": "present",
+ "system": "172.20.67.167",
+ "user": "dohlemacher",
+ "password": "123456"
+ }
+}
diff --git a/ansible_collections/infinidat/infinibox/tests/hacking/infini_users_repository_ad_stat.json b/ansible_collections/infinidat/infinibox/tests/hacking/infini_users_repository_ad_stat.json
new file mode 100644
index 000000000..9b4d0ddaa
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/tests/hacking/infini_users_repository_ad_stat.json
@@ -0,0 +1,9 @@
+{
+ "ANSIBLE_MODULE_ARGS": {
+ "name": "PSUS_ANSIBLE_ad",
+ "state": "stat",
+ "system": "172.20.68.187",
+ "user": "dohlemacher",
+ "password": "123456"
+ }
+}
diff --git a/ansible_collections/infinidat/infinibox/tests/hacking/infini_users_repository_ldap_absent.json b/ansible_collections/infinidat/infinibox/tests/hacking/infini_users_repository_ldap_absent.json
new file mode 100644
index 000000000..39c481ef6
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/tests/hacking/infini_users_repository_ldap_absent.json
@@ -0,0 +1,11 @@
+{
+ "ANSIBLE_MODULE_ARGS": {
+ "object_type": "fs-snap",
+ "object_name": "PSUS_ANSIBLE_fssnap",
+ "key": "fssnapfoo",
+ "state": "absent",
+ "system": "ibox1521.lab.wt.us.infinidat.com",
+ "user": "psus-gitlab-cicd",
+ "password": "lemonTrainSchoolCrank"
+ }
+}
diff --git a/ansible_collections/infinidat/infinibox/tests/hacking/infini_users_repository_ldap_present.json b/ansible_collections/infinidat/infinibox/tests/hacking/infini_users_repository_ldap_present.json
new file mode 100644
index 000000000..8040a4c5f
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/tests/hacking/infini_users_repository_ldap_present.json
@@ -0,0 +1,20 @@
+{
+ "ANSIBLE_MODULE_ARGS": {
+ "name": "PSUS_ANSIBLE_ldap",
+ "bind_password": "123456",
+ "bind_username": "adminstrator",
+ "ldap_servers": [],
+ "schema_group_class": "group",
+ "schema_group_memberof_attribute": "memberof",
+ "schema_group_name_attribute": "cn",
+ "schema_groups_basedn": "cn=users,dc=mgmt,dc=local",
+ "schema_user_class": "user",
+ "schema_username_attribute": "sAMAccountName",
+ "schema_users_basedn": "cn=users,dc=mgmt,dc=local",
+ "repository_type": "LDAP",
+ "state": "present",
+ "system": "ibox1521.lab.wt.us.infinidat.com",
+ "user": "psus-gitlab-cicd",
+ "password": "lemonTrainSchoolCrank"
+ }
+}
diff --git a/ansible_collections/infinidat/infinibox/tests/hacking/infini_users_repository_ldap_stat.json b/ansible_collections/infinidat/infinibox/tests/hacking/infini_users_repository_ldap_stat.json
new file mode 100644
index 000000000..6526c1ad1
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/tests/hacking/infini_users_repository_ldap_stat.json
@@ -0,0 +1,9 @@
+{
+ "ANSIBLE_MODULE_ARGS": {
+ "name": "infinidat",
+ "state": "stat",
+ "system": "ibox1521.lab.wt.us.infinidat.com",
+ "user": "psus-gitlab-cicd",
+ "password": "lemonTrainSchoolCrank"
+ }
+}
diff --git a/ansible_collections/infinidat/infinibox/tests/hacking/infini_users_repository_present.json b/ansible_collections/infinidat/infinibox/tests/hacking/infini_users_repository_present.json
new file mode 100644
index 000000000..5566ca5cb
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/tests/hacking/infini_users_repository_present.json
@@ -0,0 +1,19 @@
+{
+ "ANSIBLE_MODULE_ARGS": {
+ "name": "PSUS_ANSIBLE_ad",
+ "bind_password": "tuFrAxahuYe4",
+ "bind_username": "conldap",
+ "ad_domain_name": "infinidat.com",
+ "repository_type": "ActiveDirectory",
+ "schema_group_class": "group",
+ "schema_group_memberof_attribute": "memberof",
+ "schema_group_name_attribute": "cn",
+ "schema_groups_basedn": "",
+ "schema_user_class": "user",
+ "schema_username_attribute": "sAMAccountName",
+ "state": "present",
+ "system": "172.20.67.167",
+ "user": "dohlemacher",
+ "password": "123456"
+ }
+}
diff --git a/ansible_collections/infinidat/infinibox/tests/hacking/infini_users_repository_stat.json b/ansible_collections/infinidat/infinibox/tests/hacking/infini_users_repository_stat.json
new file mode 100644
index 000000000..9b4d0ddaa
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/tests/hacking/infini_users_repository_stat.json
@@ -0,0 +1,9 @@
+{
+ "ANSIBLE_MODULE_ARGS": {
+ "name": "PSUS_ANSIBLE_ad",
+ "state": "stat",
+ "system": "172.20.68.187",
+ "user": "dohlemacher",
+ "password": "123456"
+ }
+}
diff --git a/ansible_collections/infinidat/infinibox/tests/hacking/infini_vol_absent.json b/ansible_collections/infinidat/infinibox/tests/hacking/infini_vol_absent.json
new file mode 100644
index 000000000..6b2dcf0ba
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/tests/hacking/infini_vol_absent.json
@@ -0,0 +1,11 @@
+{
+ "ANSIBLE_MODULE_ARGS": {
+ "name": "PSUS_ANSIBLE_vol",
+ "state": "absent",
+ "volume_type": "master",
+
+ "system": "172.20.68.167",
+ "user": "admin",
+ "password": "123456"
+ }
+}
diff --git a/ansible_collections/infinidat/infinibox/tests/hacking/infini_vol_present.json b/ansible_collections/infinidat/infinibox/tests/hacking/infini_vol_present.json
new file mode 100644
index 000000000..5ac8e2b3f
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/tests/hacking/infini_vol_present.json
@@ -0,0 +1,13 @@
+{
+ "ANSIBLE_MODULE_ARGS": {
+ "name": "PSUS_ANSIBLE_vol",
+ "pool": "PSUS_ANSIBLE_pool",
+ "size": "1GB",
+ "state": "present",
+ "volume_type": "master",
+
+ "system": "172.20.68.167",
+ "user": "admin",
+ "password": "123456"
+ }
+}
diff --git a/ansible_collections/infinidat/infinibox/tests/hacking/infini_vol_stat.json b/ansible_collections/infinidat/infinibox/tests/hacking/infini_vol_stat.json
new file mode 100644
index 000000000..d1380be58
--- /dev/null
+++ b/ansible_collections/infinidat/infinibox/tests/hacking/infini_vol_stat.json
@@ -0,0 +1,11 @@
+{
+ "ANSIBLE_MODULE_ARGS": {
+ "name": "PSUS_ANSIBLE_vol",
+ "state": "stat",
+ "volume_type": "master",
+
+ "system": "172.20.68.167",
+ "user": "admin",
+ "password": "123456"
+ }
+}
diff --git a/ansible_collections/infinidat/infinibox/tests/sanity/ignore-2.10.txt b/ansible_collections/infinidat/infinibox/tests/sanity/ignore-2.10.txt
deleted file mode 100644
index e69de29bb..000000000
--- a/ansible_collections/infinidat/infinibox/tests/sanity/ignore-2.10.txt
+++ /dev/null