summaryrefslogtreecommitdiffstats
path: root/ansible_collections/ibm
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-18 05:52:22 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-18 05:52:22 +0000
commit38b7c80217c4e72b1d8988eb1e60bb6e77334114 (patch)
tree356e9fd3762877d07cde52d21e77070aeff7e789 /ansible_collections/ibm
parentAdding upstream version 7.7.0+dfsg. (diff)
downloadansible-38b7c80217c4e72b1d8988eb1e60bb6e77334114.tar.xz
ansible-38b7c80217c4e72b1d8988eb1e60bb6e77334114.zip
Adding upstream version 9.4.0+dfsg.upstream/9.4.0+dfsg
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'ansible_collections/ibm')
-rw-r--r--ansible_collections/ibm/spectrum_virtualize/.github/workflows/main.yml14
-rw-r--r--ansible_collections/ibm/spectrum_virtualize/FILES.json487
-rw-r--r--ansible_collections/ibm/spectrum_virtualize/MANIFEST.json4
-rw-r--r--ansible_collections/ibm/spectrum_virtualize/README.md152
-rw-r--r--ansible_collections/ibm/spectrum_virtualize/changelogs/changelog.yaml6
-rw-r--r--ansible_collections/ibm/spectrum_virtualize/meta/runtime.yml6
-rw-r--r--ansible_collections/ibm/spectrum_virtualize/playbooks/multi_volume_create_host_mapping_zone_multipath/Readme.txt28
-rw-r--r--ansible_collections/ibm/spectrum_virtualize/playbooks/multi_volume_create_host_mapping_zone_multipath/multiple_vol_creation_zone_map.yml204
-rw-r--r--ansible_collections/ibm/spectrum_virtualize/playbooks/multi_volume_create_host_mapping_zone_multipath/multiple_vol_creation_zone_map_vars.txt30
-rw-r--r--ansible_collections/ibm/spectrum_virtualize/playbooks/volume_migration_on_svc_iscsi/Readme.txt45
-rw-r--r--ansible_collections/ibm/spectrum_virtualize/playbooks/volume_migration_on_svc_iscsi/create_iscsi_host_map_vol_switch.yml143
-rw-r--r--ansible_collections/ibm/spectrum_virtualize/playbooks/volume_migration_on_svc_iscsi/initiate_migration_for_given_volume.yml33
-rw-r--r--ansible_collections/ibm/spectrum_virtualize/playbooks/volume_migration_on_svc_iscsi/vol_migration_vars.txt36
-rw-r--r--ansible_collections/ibm/spectrum_virtualize/plugins/modules/ibm_svc_vdisk.py421
-rw-r--r--ansible_collections/ibm/spectrum_virtualize/tests/unit/plugins/modules/test_ibm_svc_vdisk.py473
-rw-r--r--ansible_collections/ibm/storage_virtualize/.github/workflows/ansible-test.yml249
-rw-r--r--ansible_collections/ibm/storage_virtualize/.github/workflows/extra-docs-linting.yml34
-rw-r--r--ansible_collections/ibm/storage_virtualize/.github/workflows/galaxy-importer.yml47
-rw-r--r--ansible_collections/ibm/storage_virtualize/.vscode/extensions.json5
-rw-r--r--ansible_collections/ibm/storage_virtualize/CHANGELOG.rst5
-rw-r--r--ansible_collections/ibm/storage_virtualize/CODE_OF_CONDUCT.md3
-rw-r--r--ansible_collections/ibm/storage_virtualize/CONTRIBUTING.md3
-rw-r--r--ansible_collections/ibm/storage_virtualize/FILES.json1216
-rw-r--r--ansible_collections/ibm/storage_virtualize/LICENSE674
-rw-r--r--ansible_collections/ibm/storage_virtualize/MAINTAINERS0
-rw-r--r--ansible_collections/ibm/storage_virtualize/MAINTAINING.md3
-rw-r--r--ansible_collections/ibm/storage_virtualize/MANIFEST.json34
-rw-r--r--ansible_collections/ibm/storage_virtualize/README.md157
-rw-r--r--ansible_collections/ibm/storage_virtualize/REVIEW_CHECKLIST.md3
-rw-r--r--ansible_collections/ibm/storage_virtualize/changelogs/changelog.yaml60
-rw-r--r--ansible_collections/ibm/storage_virtualize/changelogs/config.yaml29
-rw-r--r--ansible_collections/ibm/storage_virtualize/changelogs/fragments/.keep0
-rw-r--r--ansible_collections/ibm/storage_virtualize/codecov.yml6
-rw-r--r--ansible_collections/ibm/storage_virtualize/docs/docsite/links.yml45
-rw-r--r--ansible_collections/ibm/storage_virtualize/galaxy-importer.cfg2
-rw-r--r--ansible_collections/ibm/storage_virtualize/meta/execution-environment.yml12
-rw-r--r--ansible_collections/ibm/storage_virtualize/meta/runtime.yml2
-rw-r--r--ansible_collections/ibm/storage_virtualize/playbooks/cluster_config_replication/README.md88
-rw-r--r--ansible_collections/ibm/storage_virtualize/playbooks/cluster_config_replication/extract_src_cluster_config.yml97
-rw-r--r--ansible_collections/ibm/storage_virtualize/playbooks/cluster_config_replication/replicate_config_on_target_cluster.yml71
-rw-r--r--ansible_collections/ibm/storage_virtualize/playbooks/cluster_config_replication/vars/replication_vars52
-rw-r--r--ansible_collections/ibm/storage_virtualize/playbooks/cluster_config_replication/vars/src_cluster_vars42
-rw-r--r--ansible_collections/ibm/storage_virtualize/playbooks/cluster_config_replication/vars/target_cluster_vars4
-rw-r--r--ansible_collections/ibm/storage_virtualize/playbooks/create_GMCV_in_CG.yml119
-rw-r--r--ansible_collections/ibm/storage_virtualize/playbooks/generic_ansible_sample.yaml34
-rw-r--r--ansible_collections/ibm/storage_virtualize/playbooks/generic_info.yml24
-rw-r--r--ansible_collections/ibm/storage_virtualize/playbooks/initial_setup_system_complete.yml74
-rw-r--r--ansible_collections/ibm/storage_virtualize/playbooks/map_volume_to_host.yml47
-rw-r--r--ansible_collections/ibm/storage_virtualize/playbooks/multi_volume_create_host_mapping_zone_multipath/Readme.txt28
-rw-r--r--ansible_collections/ibm/storage_virtualize/playbooks/multi_volume_create_host_mapping_zone_multipath/multiple_vol_creation_zone_map.yml203
-rw-r--r--ansible_collections/ibm/storage_virtualize/playbooks/multi_volume_create_host_mapping_zone_multipath/multiple_vol_creation_zone_map_vars.txt30
-rw-r--r--ansible_collections/ibm/storage_virtualize/playbooks/security_mgmt.yml27
-rw-r--r--ansible_collections/ibm/storage_virtualize/playbooks/volume_migrate.yml79
-rw-r--r--ansible_collections/ibm/storage_virtualize/playbooks/volume_migration/README.txt36
-rw-r--r--ansible_collections/ibm/storage_virtualize/playbooks/volume_migration/initiate_migration.yml33
-rw-r--r--ansible_collections/ibm/storage_virtualize/playbooks/volume_migration/rescan_and_switch_paths.yml147
-rw-r--r--ansible_collections/ibm/storage_virtualize/playbooks/volume_migration/vol_migration_vars.txt28
-rw-r--r--ansible_collections/ibm/storage_virtualize/playbooks/volume_migration_on_svc_iscsi/Readme.txt45
-rw-r--r--ansible_collections/ibm/storage_virtualize/playbooks/volume_migration_on_svc_iscsi/create_iscsi_host_map_vol_switch.yml143
-rw-r--r--ansible_collections/ibm/storage_virtualize/playbooks/volume_migration_on_svc_iscsi/initiate_migration_for_given_volume.yml33
-rw-r--r--ansible_collections/ibm/storage_virtualize/playbooks/volume_migration_on_svc_iscsi/vol_migration_vars.txt36
-rw-r--r--ansible_collections/ibm/storage_virtualize/playbooks/volumegrp_create.yml29
-rw-r--r--ansible_collections/ibm/storage_virtualize/plugins/module_utils/ibm_svc_ssh.py129
-rw-r--r--ansible_collections/ibm/storage_virtualize/plugins/module_utils/ibm_svc_utils.py360
-rw-r--r--ansible_collections/ibm/storage_virtualize/plugins/modules/__init__.py0
-rw-r--r--ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_sv_manage_awss3_cloudaccount.py496
-rw-r--r--ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_sv_manage_cloud_backups.py391
-rw-r--r--ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_sv_manage_fc_partnership.py419
-rw-r--r--ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_sv_manage_fcportsetmember.py250
-rw-r--r--ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_sv_manage_ip_partnership.py637
-rw-r--r--ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_sv_manage_provisioning_policy.py343
-rw-r--r--ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_sv_manage_replication_policy.py339
-rw-r--r--ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_sv_manage_security.py340
-rw-r--r--ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_sv_manage_snapshot.py630
-rw-r--r--ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_sv_manage_snapshotpolicy.py365
-rw-r--r--ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_sv_manage_ssl_certificate.py158
-rw-r--r--ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_sv_manage_storage_partition.py359
-rw-r--r--ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_sv_manage_syslog_server.py462
-rw-r--r--ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_sv_manage_truststore_for_replication.py401
-rw-r--r--ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_sv_restore_cloud_backup.py304
-rw-r--r--ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_sv_switch_replication_direction.py187
-rw-r--r--ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_auth.py133
-rw-r--r--ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_complete_initial_setup.py142
-rw-r--r--ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_host.py766
-rw-r--r--ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_hostcluster.py344
-rw-r--r--ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_info.py1070
-rw-r--r--ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_initial_setup.py599
-rw-r--r--ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_callhome.py890
-rw-r--r--ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_consistgrp_flashcopy.py280
-rw-r--r--ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_cv.py401
-rw-r--r--ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_flashcopy.py572
-rw-r--r--ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_ip.py317
-rw-r--r--ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_migration.py779
-rw-r--r--ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_mirrored_volume.py757
-rw-r--r--ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_ownershipgroup.py244
-rw-r--r--ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_portset.py405
-rw-r--r--ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_replication.py544
-rw-r--r--ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_replicationgroup.py379
-rw-r--r--ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_safeguarded_policy.py342
-rw-r--r--ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_sra.py412
-rw-r--r--ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_user.py385
-rw-r--r--ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_usergroup.py321
-rw-r--r--ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_volume.py867
-rw-r--r--ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_volumegroup.py953
-rw-r--r--ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_mdisk.py440
-rw-r--r--ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_mdiskgrp.py674
-rw-r--r--ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_start_stop_flashcopy.py262
-rw-r--r--ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_start_stop_replication.py294
-rw-r--r--ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_vol_map.py374
-rw-r--r--ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svcinfo_command.py226
-rw-r--r--ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svctask_command.py211
-rw-r--r--ansible_collections/ibm/storage_virtualize/requirements.txt3
-rw-r--r--ansible_collections/ibm/storage_virtualize/requirements.yml4
-rw-r--r--ansible_collections/ibm/storage_virtualize/tests/config.yml2
-rw-r--r--ansible_collections/ibm/storage_virtualize/tests/integration/targets/.gitkeep3
-rw-r--r--ansible_collections/ibm/storage_virtualize/tests/unit/.gitkeep3
-rw-r--r--ansible_collections/ibm/storage_virtualize/tests/unit/plugins/module_utils/test_ibm_svc_ssh.py102
-rw-r--r--ansible_collections/ibm/storage_virtualize/tests/unit/plugins/module_utils/test_ibm_svc_utils.py162
-rw-r--r--ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_sv_manage_awss3_cloudaccount.py594
-rw-r--r--ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_sv_manage_cloud_backups.py445
-rw-r--r--ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_sv_manage_fc_partnership.py422
-rw-r--r--ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_sv_manage_fcportsetmember.py215
-rw-r--r--ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_sv_manage_ip_partnership.py797
-rw-r--r--ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_sv_manage_provisioning_policy.py370
-rw-r--r--ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_sv_manage_replication_policy.py348
-rw-r--r--ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_sv_manage_security.py109
-rw-r--r--ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_sv_manage_snapshot.py793
-rw-r--r--ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_sv_manage_snapshot_policy.py332
-rw-r--r--ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_sv_manage_ssl_certificate.py94
-rw-r--r--ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_sv_manage_storage_partition.py392
-rw-r--r--ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_sv_manage_syslog_server.py396
-rw-r--r--ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_sv_manage_truststore_for_replication.py322
-rw-r--r--ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_sv_restore_cloud_backup.py204
-rw-r--r--ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_sv_switch_replication_direction.py144
-rw-r--r--ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_auth.py115
-rw-r--r--ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_complete_initial_setup.py133
-rw-r--r--ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_host.py682
-rw-r--r--ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_hostcluster.py343
-rw-r--r--ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_info.py304
-rw-r--r--ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_initial_setup.py639
-rw-r--r--ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_callhome.py847
-rw-r--r--ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_consistgrp_flashcopy.py401
-rw-r--r--ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_cv.py808
-rw-r--r--ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_flashcopy.py837
-rw-r--r--ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_ip.py742
-rw-r--r--ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_migration.py1613
-rw-r--r--ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_mirrored_volume.py725
-rw-r--r--ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_ownershipgroup.py320
-rw-r--r--ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_portset.py415
-rw-r--r--ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_replication.py856
-rw-r--r--ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_replicationgroup.py441
-rw-r--r--ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_safeguarded_policy.py324
-rw-r--r--ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_sra.py401
-rw-r--r--ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_user.py521
-rw-r--r--ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_usergroup.py448
-rw-r--r--ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_volume.py1731
-rw-r--r--ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_volumegroup.py1638
-rw-r--r--ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_mdisk.py439
-rw-r--r--ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_mdiskgrp.py894
-rw-r--r--ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_start_stop_flashcopy.py482
-rw-r--r--ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_start_stop_replication.py388
-rw-r--r--ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_vol_map.py360
-rw-r--r--ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svcinfo_command.py190
-rw-r--r--ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svctask_command.py164
-rw-r--r--ansible_collections/ibm/storage_virtualize/tests/unit/requirements.txt2
165 files changed, 50348 insertions, 1284 deletions
diff --git a/ansible_collections/ibm/spectrum_virtualize/.github/workflows/main.yml b/ansible_collections/ibm/spectrum_virtualize/.github/workflows/main.yml
index bad1d8dd8..49cfbcb9e 100644
--- a/ansible_collections/ibm/spectrum_virtualize/.github/workflows/main.yml
+++ b/ansible_collections/ibm/spectrum_virtualize/.github/workflows/main.yml
@@ -21,18 +21,11 @@ jobs:
- stable-2.13
- stable-2.14
python:
- - '2.7'
- '3.8'
- '3.9'
exclude:
- ansible: stable-2.9
python: '3.9'
- - ansible: stable-2.12
- python: '2.7'
- - ansible: stable-2.13
- python: '2.7'
- - ansible: stable-2.14
- python: '2.7'
- ansible: stable-2.14
python: '3.8'
steps:
@@ -73,18 +66,11 @@ jobs:
- stable-2.13
- stable-2.14
python:
- - '2.7'
- '3.8'
- '3.9'
exclude:
- ansible: stable-2.9
python: '3.9'
- - ansible: stable-2.12
- python: '2.7'
- - ansible: stable-2.13
- python: '2.7'
- - ansible: stable-2.14
- python: '2.7'
- ansible: stable-2.14
python: '3.8'
steps:
diff --git a/ansible_collections/ibm/spectrum_virtualize/FILES.json b/ansible_collections/ibm/spectrum_virtualize/FILES.json
index 0043a5e17..20b03c30d 100644
--- a/ansible_collections/ibm/spectrum_virtualize/FILES.json
+++ b/ansible_collections/ibm/spectrum_virtualize/FILES.json
@@ -25,7 +25,7 @@
"name": ".github/workflows/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "ea85e89582ae7e5d03ea8f9ecfaf9236285efee981ce27db3241d161b7ba1152",
+ "chksum_sha256": "31af35810fde2bd8b1c68daf36a6d138ddad22fd0c1330ab8d9265a19849bf3f",
"format": 1
},
{
@@ -53,7 +53,7 @@
"name": "changelogs/changelog.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "9d298708b3e097a7f51a5125351efa0ae66d882f4db3e607c271b32ea68c0664",
+ "chksum_sha256": "27f9db0a097225f1fdb7e228f483cd1b743d90b8f869544276afa1e6472a42c6",
"format": 1
},
{
@@ -71,6 +71,13 @@
"format": 1
},
{
+ "name": "galaxy-importer.cfg",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7501d13dd591dda472fcdb8b9f44677a50ef86620f7756ba9c1196a41b2cd33c",
+ "format": 1
+ },
+ {
"name": "meta",
"ftype": "dir",
"chksum_type": null,
@@ -78,17 +85,17 @@
"format": 1
},
{
- "name": "meta/runtime.yml",
+ "name": "meta/execution-environment.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f5614008729520e63806d0dbb04f177ad75b25ba1aead1bb32585b45b1199d69",
+ "chksum_sha256": "82854d0088f5a33247495393b516cea47b8c522131c0af4b7be755d75107af3d",
"format": 1
},
{
- "name": "meta/execution-environment.yml",
+ "name": "meta/runtime.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "82854d0088f5a33247495393b516cea47b8c522131c0af4b7be755d75107af3d",
+ "chksum_sha256": "c03f11d23823bc02cb96a7d367906c9dab8fa9aeba23be4ba0263018359130e5",
"format": 1
},
{
@@ -99,24 +106,24 @@
"format": 1
},
{
- "name": "playbooks/generic_ansible_sample.yaml",
+ "name": "playbooks/create_GMCV_in_CG.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "4d804261c8c28d3a39afe649c255f983cdfd92b0b09eef320e046c10c60c7d84",
+ "chksum_sha256": "69b5d3f520619991ac8e75d5140bd6a5a46720e114e8f7afff2c41d5c90c11be",
"format": 1
},
{
- "name": "playbooks/generic_info.yml",
+ "name": "playbooks/generic_ansible_sample.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "0e6347a4ddc84cc8daf6d73f6c9a26cd10b1d9a12d6ef971a1e888a448303b27",
+ "chksum_sha256": "4d804261c8c28d3a39afe649c255f983cdfd92b0b09eef320e046c10c60c7d84",
"format": 1
},
{
- "name": "playbooks/create_GMCV_in_CG.yml",
+ "name": "playbooks/generic_info.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "69b5d3f520619991ac8e75d5140bd6a5a46720e114e8f7afff2c41d5c90c11be",
+ "chksum_sha256": "0e6347a4ddc84cc8daf6d73f6c9a26cd10b1d9a12d6ef971a1e888a448303b27",
"format": 1
},
{
@@ -141,13 +148,6 @@
"format": 1
},
{
- "name": "playbooks/volumegrp_create.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d58341d383558e50798b3c140087079d254e10614e22d4165b600470565c4109",
- "format": 1
- },
- {
"name": "playbooks/volume_migration",
"ftype": "dir",
"chksum_type": null,
@@ -183,6 +183,76 @@
"format": 1
},
{
+ "name": "playbooks/volumegrp_create.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d58341d383558e50798b3c140087079d254e10614e22d4165b600470565c4109",
+ "format": 1
+ },
+ {
+ "name": "playbooks/volume_migration_on_svc_iscsi",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/volume_migration_on_svc_iscsi/vol_migration_vars.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ef716cf64ce518b2db70913c828ec89468325caff01a16b88e94968e26b0c6f5",
+ "format": 1
+ },
+ {
+ "name": "playbooks/volume_migration_on_svc_iscsi/Readme.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "35522161c5ed3fbe0b9b8e2f0957b9941149447e6dc9b5e47a5e0a782c53a304",
+ "format": 1
+ },
+ {
+ "name": "playbooks/volume_migration_on_svc_iscsi/create_iscsi_host_map_vol_switch.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b658de901f77820a15bc5bc2251e0eb7a912b05db1ace32265211cb1c2e44fa5",
+ "format": 1
+ },
+ {
+ "name": "playbooks/volume_migration_on_svc_iscsi/initiate_migration_for_given_volume.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "12a1297efc0e9e416d7c55515103dc7e6d64df86009943f8a97deed1386ff0d9",
+ "format": 1
+ },
+ {
+ "name": "playbooks/multi_volume_create_host_mapping_zone_multipath",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/multi_volume_create_host_mapping_zone_multipath/multiple_vol_creation_zone_map_vars.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6c638ad2658cbe7240e8017a455f6fa8c6b901d51559c50daee2f4a589456152",
+ "format": 1
+ },
+ {
+ "name": "playbooks/multi_volume_create_host_mapping_zone_multipath/Readme.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0526ba82bf45065fab533764ed1e65e436abb87d9f23d67019495fa80578d9f2",
+ "format": 1
+ },
+ {
+ "name": "playbooks/multi_volume_create_host_mapping_zone_multipath/multiple_vol_creation_zone_map.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7af6fed971862af9c6f91584d4022378727a6625cc41eab16e10e2fda462adef",
+ "format": 1
+ },
+ {
"name": "plugins",
"ftype": "dir",
"chksum_type": null,
@@ -232,185 +302,185 @@
"format": 1
},
{
- "name": "plugins/modules/ibm_svc_auth.py",
+ "name": "plugins/modules/ibm_sv_manage_awss3_cloudaccount.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "5b81f6b4db66443e632239ff4af910b95c92e571a40994874ba423113c805a9f",
+ "chksum_sha256": "eeb3aae88a6882808d30a34043b58cc31a0ca621049b4a45c4daa99b5ab57c87",
"format": 1
},
{
- "name": "plugins/modules/ibm_svc_complete_initial_setup.py",
+ "name": "plugins/modules/ibm_sv_manage_cloud_backups.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "1905de93c10ca651eedd79c81b442c0bf5fd33e9c484713e2cf6eb33d3d71785",
+ "chksum_sha256": "9149ea6f4eda997e53c5b978bd7ced8a90ada1c9961a43a171c53a95163a087f",
"format": 1
},
{
- "name": "plugins/modules/ibm_svc_manage_callhome.py",
+ "name": "plugins/modules/ibm_sv_manage_fc_partnership.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "2bfa05adc0609f15011fbe1c1b9c8c45852c783ee560a88049b55dd15299a186",
+ "chksum_sha256": "82d680d91a0a428da4490fa7908bca517b947a67054250412d0547a8eac1444f",
"format": 1
},
{
- "name": "plugins/modules/ibm_svc_manage_cv.py",
+ "name": "plugins/modules/ibm_sv_manage_fcportsetmember.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a9eb49551a8d77107fb11a3ea60daed8cb4630cd0fe0dd4f8f9a1a66d03cbd7d",
+ "chksum_sha256": "f727d3e62d8b4e2b72480e0abf2c1209ab5225f95671218da34284b5f63e4e54",
"format": 1
},
{
- "name": "plugins/modules/ibm_svc_manage_flashcopy.py",
+ "name": "plugins/modules/ibm_sv_manage_ip_partnership.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "e716ba71c8b06026f19d5085bf46c1fdff89bda8e33c44048ff86ceee2f624a6",
+ "chksum_sha256": "cccfb97b0336b1f7c259032a85d08e65c1b78ac73ae6e2dc4bb8fd6b19c08b1d",
"format": 1
},
{
- "name": "plugins/modules/ibm_svc_manage_ip.py",
+ "name": "plugins/modules/ibm_sv_manage_provisioning_policy.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "9a1564d37a960f0864d8fa977d6871aa95096f0c7423d691c8fd3d8b93315fbe",
+ "chksum_sha256": "88b6c6027dc71e59e25c517a95bfcd8dc6514c9857e990f5b697c5a35525193c",
"format": 1
},
{
- "name": "plugins/modules/ibm_svc_manage_mirrored_volume.py",
+ "name": "plugins/modules/ibm_sv_manage_replication_policy.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "0d3dc16d5803c5227143c0aa96abcf5eb13bcd5309b51e25f312082e4c39dafe",
+ "chksum_sha256": "50989a73d4b4d0cc9e3f1777cc3ae09911033f6785babedfaa6439b98151720f",
"format": 1
},
{
- "name": "plugins/modules/ibm_svc_manage_replication.py",
+ "name": "plugins/modules/ibm_sv_manage_snapshot.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "feebabbc03efd86c82f55de6e1c976083764752cd55a8cc8944c02d2d5e6a7e6",
+ "chksum_sha256": "ec1dfb1390100778abf18a0c2488411ab29eee06f24b05b1f76d9fe5cf8a52dd",
"format": 1
},
{
- "name": "plugins/modules/ibm_svc_manage_replicationgroup.py",
+ "name": "plugins/modules/ibm_sv_manage_snapshotpolicy.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "264fbe5d6ad76ea098bdd694a291b3e842c3363cf1212030d694b99a54190720",
+ "chksum_sha256": "e61d04c1ca744f205f97a1d101a8fc13b9cc12f66f4cb2277f53bb77fa9d5505",
"format": 1
},
{
- "name": "plugins/modules/ibm_svc_manage_user.py",
+ "name": "plugins/modules/ibm_sv_manage_ssl_certificate.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "8a748c40e02a044ae36034f6789c7f62d44e8cc298d926e06de7ec5e6b566ab3",
+ "chksum_sha256": "8fcba087976d6c9cc98b12d77c3930d7306fb59b026776b77e8dae93ec3c2f81",
"format": 1
},
{
- "name": "plugins/modules/ibm_svc_manage_usergroup.py",
+ "name": "plugins/modules/ibm_sv_manage_truststore_for_replication.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "795c8b2d00d0306edc5593446201107356a3f69db938cd2862eb409c35c8605c",
+ "chksum_sha256": "d669db7064795663a2345e389bafe4edd3e260e753e012ce27b7d91d240168a2",
"format": 1
},
{
- "name": "plugins/modules/ibm_svc_mdisk.py",
+ "name": "plugins/modules/ibm_sv_restore_cloud_backup.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "3c2903182074095bad71c2992576e5227661668c4ed4422be004cfc1e6b29abb",
+ "chksum_sha256": "178341bf08b2da2f298e294965f689c741e371e0062f169f6a5d4fdebc62705d",
"format": 1
},
{
- "name": "plugins/modules/ibm_svc_start_stop_flashcopy.py",
+ "name": "plugins/modules/ibm_sv_switch_replication_direction.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a8542e5ec1020a807e1aecd6ecb44f14e60bf4641dddb5224c025411f7e5f47e",
+ "chksum_sha256": "3ae68fff59de2452e73cacff827171a1a7550a9c71fc600f751e728df9c5cfbe",
"format": 1
},
{
- "name": "plugins/modules/ibm_svc_start_stop_replication.py",
+ "name": "plugins/modules/ibm_svc_auth.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "26a75e103b07d72e9b8f6a46cd25e10cee53cf4098f31413062d0da559e12cda",
+ "chksum_sha256": "5b81f6b4db66443e632239ff4af910b95c92e571a40994874ba423113c805a9f",
"format": 1
},
{
- "name": "plugins/modules/ibm_svc_vol_map.py",
+ "name": "plugins/modules/ibm_svc_complete_initial_setup.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "77a6cb11357cf1f244c961094ea9d2245d0a9f145b9dd944ade609f7b6a4832b",
+ "chksum_sha256": "1905de93c10ca651eedd79c81b442c0bf5fd33e9c484713e2cf6eb33d3d71785",
"format": 1
},
{
- "name": "plugins/modules/ibm_svctask_command.py",
+ "name": "plugins/modules/ibm_svc_host.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "b24e5881e0e417e3723ce4593fb1d8cc60b1be546c12c358f129f742d0418c2d",
+ "chksum_sha256": "307ff594d96b4d2f6cc842f631293b024239acc357c7fd5c9bcbb5981cb6fffc",
"format": 1
},
{
- "name": "plugins/modules/ibm_sv_manage_ip_partnership.py",
+ "name": "plugins/modules/ibm_svc_hostcluster.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "cccfb97b0336b1f7c259032a85d08e65c1b78ac73ae6e2dc4bb8fd6b19c08b1d",
+ "chksum_sha256": "94362873432e23d67da21c47507da5c1470596726776fde415506d8224f48664",
"format": 1
},
{
- "name": "plugins/modules/ibm_svc_vdisk.py",
+ "name": "plugins/modules/ibm_svc_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "8f55f1ef8fe1ff8b6b178ddb8cb1688b940bbb7108951f2b313f98e593a9ad59",
+ "chksum_sha256": "affb7038cd0857aa4cef63f64bb4ff3e676954dd6b6cea7a3f693f15072a5720",
"format": 1
},
{
- "name": "plugins/modules/ibm_sv_manage_provisioning_policy.py",
+ "name": "plugins/modules/ibm_svc_initial_setup.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "88b6c6027dc71e59e25c517a95bfcd8dc6514c9857e990f5b697c5a35525193c",
+ "chksum_sha256": "0326cc4bbcf4594c81ef5797878e5e7bcb500ee4084a423a1e23dcdce545fd39",
"format": 1
},
{
- "name": "plugins/modules/ibm_sv_manage_replication_policy.py",
+ "name": "plugins/modules/ibm_svc_manage_callhome.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "50989a73d4b4d0cc9e3f1777cc3ae09911033f6785babedfaa6439b98151720f",
+ "chksum_sha256": "2bfa05adc0609f15011fbe1c1b9c8c45852c783ee560a88049b55dd15299a186",
"format": 1
},
{
- "name": "plugins/modules/ibm_sv_manage_snapshot.py",
+ "name": "plugins/modules/ibm_svc_manage_consistgrp_flashcopy.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "ec1dfb1390100778abf18a0c2488411ab29eee06f24b05b1f76d9fe5cf8a52dd",
+ "chksum_sha256": "5ca9a6fdc914d98e8a309848d0bdd5757bdb074be8d8d6b2482fdc36d18cc9b1",
"format": 1
},
{
- "name": "plugins/modules/ibm_sv_manage_snapshotpolicy.py",
+ "name": "plugins/modules/ibm_svc_manage_cv.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "e61d04c1ca744f205f97a1d101a8fc13b9cc12f66f4cb2277f53bb77fa9d5505",
+ "chksum_sha256": "a9eb49551a8d77107fb11a3ea60daed8cb4630cd0fe0dd4f8f9a1a66d03cbd7d",
"format": 1
},
{
- "name": "plugins/modules/ibm_sv_switch_replication_direction.py",
+ "name": "plugins/modules/ibm_svc_manage_flashcopy.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "3ae68fff59de2452e73cacff827171a1a7550a9c71fc600f751e728df9c5cfbe",
+ "chksum_sha256": "e716ba71c8b06026f19d5085bf46c1fdff89bda8e33c44048ff86ceee2f624a6",
"format": 1
},
{
- "name": "plugins/modules/ibm_svc_hostcluster.py",
+ "name": "plugins/modules/ibm_svc_manage_ip.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "94362873432e23d67da21c47507da5c1470596726776fde415506d8224f48664",
+ "chksum_sha256": "9a1564d37a960f0864d8fa977d6871aa95096f0c7423d691c8fd3d8b93315fbe",
"format": 1
},
{
- "name": "plugins/modules/ibm_svc_initial_setup.py",
+ "name": "plugins/modules/ibm_svc_manage_migration.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "0326cc4bbcf4594c81ef5797878e5e7bcb500ee4084a423a1e23dcdce545fd39",
+ "chksum_sha256": "98e41799506a60feb3be0c1ff92cfc9a53390d889c406e2dc40a61885587a7f8",
"format": 1
},
{
- "name": "plugins/modules/ibm_svc_manage_consistgrp_flashcopy.py",
+ "name": "plugins/modules/ibm_svc_manage_mirrored_volume.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "5ca9a6fdc914d98e8a309848d0bdd5757bdb074be8d8d6b2482fdc36d18cc9b1",
+ "chksum_sha256": "0d3dc16d5803c5227143c0aa96abcf5eb13bcd5309b51e25f312082e4c39dafe",
"format": 1
},
{
@@ -421,45 +491,52 @@
"format": 1
},
{
- "name": "plugins/modules/ibm_svc_manage_safeguarded_policy.py",
+ "name": "plugins/modules/ibm_svc_manage_portset.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "75ce0c3ad4aa96d953dcda3c54e5b8bf11a6128608616b2e8d1529d48136f4ca",
+ "chksum_sha256": "eeb6408391d9c496c9700243c7daaae4debfd3bde56d54c412355ab13e08c51a",
"format": 1
},
{
- "name": "plugins/modules/ibm_svc_manage_sra.py",
+ "name": "plugins/modules/ibm_svc_manage_replication.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "e298e0a4f5e4e8c47113ff8692f4dcdeabe880264c129c3cb07fdc96c7095ac6",
+ "chksum_sha256": "feebabbc03efd86c82f55de6e1c976083764752cd55a8cc8944c02d2d5e6a7e6",
"format": 1
},
{
- "name": "plugins/modules/ibm_svc_manage_volumegroup.py",
+ "name": "plugins/modules/ibm_svc_manage_replicationgroup.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "6944f827a63ae67d6c856aadc8348d06b1bbd362e9c1ec7b95a97938eb8456bb",
+ "chksum_sha256": "264fbe5d6ad76ea098bdd694a291b3e842c3363cf1212030d694b99a54190720",
"format": 1
},
{
- "name": "plugins/modules/ibm_sv_manage_ssl_certificate.py",
+ "name": "plugins/modules/ibm_svc_manage_safeguarded_policy.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "8fcba087976d6c9cc98b12d77c3930d7306fb59b026776b77e8dae93ec3c2f81",
+ "chksum_sha256": "75ce0c3ad4aa96d953dcda3c54e5b8bf11a6128608616b2e8d1529d48136f4ca",
"format": 1
},
{
- "name": "plugins/modules/ibm_svcinfo_command.py",
+ "name": "plugins/modules/ibm_svc_manage_sra.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "5a85d5e474c57756f167672b77f26a4cde807c91cc7dbd343f0f7286d17fb410",
+ "chksum_sha256": "e298e0a4f5e4e8c47113ff8692f4dcdeabe880264c129c3cb07fdc96c7095ac6",
"format": 1
},
{
- "name": "plugins/modules/ibm_svc_manage_migration.py",
+ "name": "plugins/modules/ibm_svc_manage_user.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "98e41799506a60feb3be0c1ff92cfc9a53390d889c406e2dc40a61885587a7f8",
+ "chksum_sha256": "8a748c40e02a044ae36034f6789c7f62d44e8cc298d926e06de7ec5e6b566ab3",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ibm_svc_manage_usergroup.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "795c8b2d00d0306edc5593446201107356a3f69db938cd2862eb409c35c8605c",
"format": 1
},
{
@@ -470,73 +547,73 @@
"format": 1
},
{
- "name": "plugins/modules/ibm_sv_manage_awss3_cloudaccount.py",
+ "name": "plugins/modules/ibm_svc_manage_volumegroup.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "eeb3aae88a6882808d30a34043b58cc31a0ca621049b4a45c4daa99b5ab57c87",
+ "chksum_sha256": "6944f827a63ae67d6c856aadc8348d06b1bbd362e9c1ec7b95a97938eb8456bb",
"format": 1
},
{
- "name": "plugins/modules/ibm_sv_manage_cloud_backups.py",
+ "name": "plugins/modules/ibm_svc_mdisk.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "9149ea6f4eda997e53c5b978bd7ced8a90ada1c9961a43a171c53a95163a087f",
+ "chksum_sha256": "3c2903182074095bad71c2992576e5227661668c4ed4422be004cfc1e6b29abb",
"format": 1
},
{
- "name": "plugins/modules/ibm_sv_manage_fc_partnership.py",
+ "name": "plugins/modules/ibm_svc_mdiskgrp.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "82d680d91a0a428da4490fa7908bca517b947a67054250412d0547a8eac1444f",
+ "chksum_sha256": "a1d03397771fcdd95163b8fea340ba3d2d8e2b7fce588e53224c6f753899d0dd",
"format": 1
},
{
- "name": "plugins/modules/ibm_sv_manage_fcportsetmember.py",
+ "name": "plugins/modules/ibm_svc_start_stop_flashcopy.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f727d3e62d8b4e2b72480e0abf2c1209ab5225f95671218da34284b5f63e4e54",
+ "chksum_sha256": "a8542e5ec1020a807e1aecd6ecb44f14e60bf4641dddb5224c025411f7e5f47e",
"format": 1
},
{
- "name": "plugins/modules/ibm_sv_manage_truststore_for_replication.py",
+ "name": "plugins/modules/ibm_svc_start_stop_replication.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "d669db7064795663a2345e389bafe4edd3e260e753e012ce27b7d91d240168a2",
+ "chksum_sha256": "26a75e103b07d72e9b8f6a46cd25e10cee53cf4098f31413062d0da559e12cda",
"format": 1
},
{
- "name": "plugins/modules/ibm_sv_restore_cloud_backup.py",
+ "name": "plugins/modules/ibm_svc_vol_map.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "178341bf08b2da2f298e294965f689c741e371e0062f169f6a5d4fdebc62705d",
+ "chksum_sha256": "77a6cb11357cf1f244c961094ea9d2245d0a9f145b9dd944ade609f7b6a4832b",
"format": 1
},
{
- "name": "plugins/modules/ibm_svc_host.py",
+ "name": "plugins/modules/ibm_svcinfo_command.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "307ff594d96b4d2f6cc842f631293b024239acc357c7fd5c9bcbb5981cb6fffc",
+ "chksum_sha256": "5a85d5e474c57756f167672b77f26a4cde807c91cc7dbd343f0f7286d17fb410",
"format": 1
},
{
- "name": "plugins/modules/ibm_svc_info.py",
+ "name": "plugins/modules/ibm_svctask_command.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "affb7038cd0857aa4cef63f64bb4ff3e676954dd6b6cea7a3f693f15072a5720",
+ "chksum_sha256": "b24e5881e0e417e3723ce4593fb1d8cc60b1be546c12c358f129f742d0418c2d",
"format": 1
},
{
- "name": "plugins/modules/ibm_svc_manage_portset.py",
+ "name": "requirements.txt",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "eeb6408391d9c496c9700243c7daaae4debfd3bde56d54c412355ab13e08c51a",
+ "chksum_sha256": "51b6a1455380a67307dd3b9d1ecfe69a26ce9cc5e6ab6fb2d29dc91c63f8fd8f",
"format": 1
},
{
- "name": "plugins/modules/ibm_svc_mdiskgrp.py",
+ "name": "requirements.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a1d03397771fcdd95163b8fea340ba3d2d8e2b7fce588e53224c6f753899d0dd",
+ "chksum_sha256": "134abe94ea511975eb2cc46a0150599afc81fb12e0a09ddbcf97d2ec39cb7357",
"format": 1
},
{
@@ -568,14 +645,14 @@
"format": 1
},
{
- "name": "tests/sanity/ignore-2.9.txt",
+ "name": "tests/sanity/ignore-2.13.txt",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "daff51a57f16f15f70a99b4356bc389ca3543c680dddf58f2721b4f652698ee0",
"format": 1
},
{
- "name": "tests/sanity/ignore-2.13.txt",
+ "name": "tests/sanity/ignore-2.9.txt",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "daff51a57f16f15f70a99b4356bc389ca3543c680dddf58f2721b4f652698ee0",
@@ -624,262 +701,241 @@
"format": 1
},
{
- "name": "tests/unit/plugins/modules/test_ibm_svc_auth.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "dcbe779c8945b5dd2ae56441c830dffe0d306c644d60955d2d95d88fc89074e2",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/test_ibm_svc_complete_initial_setup.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "187bae51452a2d69c08e229e9fe8d4742b7cdc79fcbedf0c9ded20a0e19849bb",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/test_ibm_svc_hostcluster.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2b285af061c2e81f3baff642f56566da3b69fb9057f91c739a1c48270a6f9aee",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/test_ibm_svc_initial_setup.py",
+ "name": "tests/unit/plugins/modules/test_ibm_sv_manage_awss3_cloudaccount.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a6d836087b171b39982b4e0abeb28afeadc0c19e0befa119fdd7dcc4ac471490",
+ "chksum_sha256": "fa0fbd1abb1da6a478d009c0281ec19bd91da0b8739a17890f7dfe10a84cb54e",
"format": 1
},
{
- "name": "tests/unit/plugins/modules/test_ibm_svc_manage_callhome.py",
+ "name": "tests/unit/plugins/modules/test_ibm_sv_manage_cloud_backups.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "2c863e5cf376bb135a1c3b066e7972f81915f505651a8a6aaddd239fd10d62c1",
+ "chksum_sha256": "48156ff487870ac46f4cb403957537ab300bafa121f5e61766075a649b2a2519",
"format": 1
},
{
- "name": "tests/unit/plugins/modules/test_ibm_svc_manage_consistgrp_flashcopy.py",
+ "name": "tests/unit/plugins/modules/test_ibm_sv_manage_fc_partnership.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "d69eab89afd086c37d637bed17d9cf181e8d8085510d0415497a1c7d4d45819e",
+ "chksum_sha256": "859bb227ab162b8fd00818d5a0cb9ae6a8b0b8d3a8765ad942c6ce3fcdaabe7f",
"format": 1
},
{
- "name": "tests/unit/plugins/modules/test_ibm_svc_manage_cv.py",
+ "name": "tests/unit/plugins/modules/test_ibm_sv_manage_fcportsetmember.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "1dbb60f628fffdb19cedd915fc19041f58ae4d56ce236c45e0de9ae09c8da511",
+ "chksum_sha256": "76de84375f4746ad416ea434236323857d5d48e512fef89a9cc2ce7033f5830f",
"format": 1
},
{
- "name": "tests/unit/plugins/modules/test_ibm_svc_manage_flashcopy.py",
+ "name": "tests/unit/plugins/modules/test_ibm_sv_manage_ip_partnership.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "cab4c0cb49a09fd9b06c3e38e54ad6caa32efe1461c571ace2756281e59edee4",
+ "chksum_sha256": "8bc51c38ab2c6a049d00112c45de31e3c1a19d02b85f0d32a7c578d94a158499",
"format": 1
},
{
- "name": "tests/unit/plugins/modules/test_ibm_svc_manage_ip.py",
+ "name": "tests/unit/plugins/modules/test_ibm_sv_manage_provisioning_policy.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a3f7171af40820bf4b6f8aed69d3e09dd8ebac39e242bd5eaaac91489da51c91",
+ "chksum_sha256": "384f512fac49f366678509d111913aa98a7c16079e59ace7edd0f4b5cfcfd1ce",
"format": 1
},
{
- "name": "tests/unit/plugins/modules/test_ibm_svc_manage_mirrored_volume.py",
+ "name": "tests/unit/plugins/modules/test_ibm_sv_manage_replication_policy.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "8937fb4013f7ecbf6b3773b2e852fd87d3b4d68cc20c5753f0e37e936a4e5c0b",
+ "chksum_sha256": "02c91a06f4490861a9e58425802ab9a5c9c04a9f752541daf4ec47e8a47027ce",
"format": 1
},
{
- "name": "tests/unit/plugins/modules/test_ibm_svc_manage_ownershipgroup.py",
+ "name": "tests/unit/plugins/modules/test_ibm_sv_manage_snapshot.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "1616f8f6f0bcb35513e177f00c155554b30909f78464453370b215e5245b535c",
+ "chksum_sha256": "cb78fa2f32ca6802c7ecbd246d0b1ac681c28e7a88f76435d2e78fe3a36dd107",
"format": 1
},
{
- "name": "tests/unit/plugins/modules/test_ibm_svc_manage_replication.py",
+ "name": "tests/unit/plugins/modules/test_ibm_sv_manage_snapshot_policy.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "b0343cf2f92f4cca23471c46372bccac602db38eb41bec56890e0ccea4e940b7",
+ "chksum_sha256": "047cb1813da22efb4724c565ec07224a9e7c7768743ab324409e74004f1dd83f",
"format": 1
},
{
- "name": "tests/unit/plugins/modules/test_ibm_svc_manage_replicationgroup.py",
+ "name": "tests/unit/plugins/modules/test_ibm_sv_manage_ssl_certificate.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "972bbd8d1cfda500a1651d831a35d16d6fd6e8caddf4d80480371aa17528bf6b",
+ "chksum_sha256": "176158c315c90c4eac0dca056a4058b0bb6b4142d43602503ec213d35244099b",
"format": 1
},
{
- "name": "tests/unit/plugins/modules/test_ibm_svc_manage_safeguarded_policy.py",
+ "name": "tests/unit/plugins/modules/test_ibm_sv_manage_truststore_for_replication.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "6c84c0dc60dab3e2aed8c51e9140c4f45e5844608b06c563d9c93bbe3d3f0384",
+ "chksum_sha256": "e2957f3fbaaf1049361f4a2027d565adf7e9a15f73d1af139a81aed30a67a4fb",
"format": 1
},
{
- "name": "tests/unit/plugins/modules/test_ibm_svc_manage_sra.py",
+ "name": "tests/unit/plugins/modules/test_ibm_sv_restore_cloud_backup.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "78774c98fbcf2a543fb6d49b82dea61c6d4a8105d6d2a8bc949f6980840e9037",
+ "chksum_sha256": "8360e33c724235c589c057ec45b8b415a2e8762f32638c3bc4ec40f2c243d093",
"format": 1
},
{
- "name": "tests/unit/plugins/modules/test_ibm_svc_manage_user.py",
+ "name": "tests/unit/plugins/modules/test_ibm_sv_switch_replication_direction.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "bc0a63fa0a7498b8320927737988824a187052032dbba16eedba25092c9eb6f5",
+ "chksum_sha256": "cc1dcca78d7884cd2dee677782b62023668d12124bad9fda1d7a5f1f93a0aa15",
"format": 1
},
{
- "name": "tests/unit/plugins/modules/test_ibm_svc_manage_usergroup.py",
+ "name": "tests/unit/plugins/modules/test_ibm_svc_auth.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "afff9022506bf5c5a0f38b3a45cae4e326635e960808294532d4276b45e5042e",
+ "chksum_sha256": "dcbe779c8945b5dd2ae56441c830dffe0d306c644d60955d2d95d88fc89074e2",
"format": 1
},
{
- "name": "tests/unit/plugins/modules/test_ibm_svc_mdisk.py",
+ "name": "tests/unit/plugins/modules/test_ibm_svc_complete_initial_setup.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "51717d02bef44db970d6073ee870592a0282569893351d6946162e3f4e866a4e",
+ "chksum_sha256": "187bae51452a2d69c08e229e9fe8d4742b7cdc79fcbedf0c9ded20a0e19849bb",
"format": 1
},
{
- "name": "tests/unit/plugins/modules/test_ibm_svc_start_stop_flashcopy.py",
+ "name": "tests/unit/plugins/modules/test_ibm_svc_host.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "7c1d7d6b567ea9a7b59755464df59e951a2173b200cbd91a313986093780c2f5",
+ "chksum_sha256": "751253101a547d501ae439c9f926d936e3704f0013812bb2ee8b3456d1901a4a",
"format": 1
},
{
- "name": "tests/unit/plugins/modules/test_ibm_svc_start_stop_replication.py",
+ "name": "tests/unit/plugins/modules/test_ibm_svc_hostcluster.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "5e76618cdc63825c97bd3d3ff0b7631ff7516c93540094aa3137007a5a5462ec",
+ "chksum_sha256": "2b285af061c2e81f3baff642f56566da3b69fb9057f91c739a1c48270a6f9aee",
"format": 1
},
{
- "name": "tests/unit/plugins/modules/test_ibm_svc_vdisk.py",
+ "name": "tests/unit/plugins/modules/test_ibm_svc_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "69ca79500c2039bf8ff858aef6376d726183cd619fa304e90cf600b5c2b70255",
+ "chksum_sha256": "f58fd3073d1672883ce7faedf52920965ea615dbbca1e140c3025addc5e5745a",
"format": 1
},
{
- "name": "tests/unit/plugins/modules/test_ibm_svc_vol_map.py",
+ "name": "tests/unit/plugins/modules/test_ibm_svc_initial_setup.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "c27d92040a95ed23d9e5d860b880f7bcf343da767bf2932aafe83f33fb5b3287",
+ "chksum_sha256": "a6d836087b171b39982b4e0abeb28afeadc0c19e0befa119fdd7dcc4ac471490",
"format": 1
},
{
- "name": "tests/unit/plugins/modules/test_ibm_svcinfo_command.py",
+ "name": "tests/unit/plugins/modules/test_ibm_svc_manage_callhome.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "0f7d9be0c049635ad29ea2f7676dab82069fc8747dab75488f005887029d6634",
+ "chksum_sha256": "2c863e5cf376bb135a1c3b066e7972f81915f505651a8a6aaddd239fd10d62c1",
"format": 1
},
{
- "name": "tests/unit/plugins/modules/test_ibm_svctask_command.py",
+ "name": "tests/unit/plugins/modules/test_ibm_svc_manage_consistgrp_flashcopy.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "1f48e7a9815bb15cdfe937f946041055356318feff170014b3ad2f763484078f",
+ "chksum_sha256": "d69eab89afd086c37d637bed17d9cf181e8d8085510d0415497a1c7d4d45819e",
"format": 1
},
{
- "name": "tests/unit/plugins/modules/test_ibm_sv_manage_ip_partnership.py",
+ "name": "tests/unit/plugins/modules/test_ibm_svc_manage_cv.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "8bc51c38ab2c6a049d00112c45de31e3c1a19d02b85f0d32a7c578d94a158499",
+ "chksum_sha256": "1dbb60f628fffdb19cedd915fc19041f58ae4d56ce236c45e0de9ae09c8da511",
"format": 1
},
{
- "name": "tests/unit/plugins/modules/test_ibm_sv_manage_snapshot_policy.py",
+ "name": "tests/unit/plugins/modules/test_ibm_svc_manage_flashcopy.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "047cb1813da22efb4724c565ec07224a9e7c7768743ab324409e74004f1dd83f",
+ "chksum_sha256": "cab4c0cb49a09fd9b06c3e38e54ad6caa32efe1461c571ace2756281e59edee4",
"format": 1
},
{
- "name": "tests/unit/plugins/modules/test_ibm_sv_manage_provisioning_policy.py",
+ "name": "tests/unit/plugins/modules/test_ibm_svc_manage_ip.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "384f512fac49f366678509d111913aa98a7c16079e59ace7edd0f4b5cfcfd1ce",
+ "chksum_sha256": "a3f7171af40820bf4b6f8aed69d3e09dd8ebac39e242bd5eaaac91489da51c91",
"format": 1
},
{
- "name": "tests/unit/plugins/modules/test_ibm_sv_manage_replication_policy.py",
+ "name": "tests/unit/plugins/modules/test_ibm_svc_manage_migration.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "02c91a06f4490861a9e58425802ab9a5c9c04a9f752541daf4ec47e8a47027ce",
+ "chksum_sha256": "6f89529af5da44531b020271e058521d7252287ee713f76252ba03f98e415d34",
"format": 1
},
{
- "name": "tests/unit/plugins/modules/test_ibm_sv_manage_snapshot.py",
+ "name": "tests/unit/plugins/modules/test_ibm_svc_manage_mirrored_volume.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "cb78fa2f32ca6802c7ecbd246d0b1ac681c28e7a88f76435d2e78fe3a36dd107",
+ "chksum_sha256": "8937fb4013f7ecbf6b3773b2e852fd87d3b4d68cc20c5753f0e37e936a4e5c0b",
"format": 1
},
{
- "name": "tests/unit/plugins/modules/test_ibm_sv_manage_truststore_for_replication.py",
+ "name": "tests/unit/plugins/modules/test_ibm_svc_manage_ownershipgroup.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "e2957f3fbaaf1049361f4a2027d565adf7e9a15f73d1af139a81aed30a67a4fb",
+ "chksum_sha256": "1616f8f6f0bcb35513e177f00c155554b30909f78464453370b215e5245b535c",
"format": 1
},
{
- "name": "tests/unit/plugins/modules/test_ibm_sv_switch_replication_direction.py",
+ "name": "tests/unit/plugins/modules/test_ibm_svc_manage_portset.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "cc1dcca78d7884cd2dee677782b62023668d12124bad9fda1d7a5f1f93a0aa15",
+ "chksum_sha256": "a24941a59674fd82acf4de65aacd0c185488324276f6e4a915d3209ca5042ac7",
"format": 1
},
{
- "name": "tests/unit/plugins/modules/test_ibm_svc_manage_volumegroup.py",
+ "name": "tests/unit/plugins/modules/test_ibm_svc_manage_replication.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "530795c8b22273d3d95bf67b671182eb86b34a1fc2639b93261ba1ca30c7597c",
+ "chksum_sha256": "b0343cf2f92f4cca23471c46372bccac602db38eb41bec56890e0ccea4e940b7",
"format": 1
},
{
- "name": "tests/unit/plugins/modules/test_ibm_sv_manage_ssl_certificate.py",
+ "name": "tests/unit/plugins/modules/test_ibm_svc_manage_replicationgroup.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "176158c315c90c4eac0dca056a4058b0bb6b4142d43602503ec213d35244099b",
+ "chksum_sha256": "972bbd8d1cfda500a1651d831a35d16d6fd6e8caddf4d80480371aa17528bf6b",
"format": 1
},
{
- "name": "tests/unit/plugins/modules/test_ibm_sv_manage_awss3_cloudaccount.py",
+ "name": "tests/unit/plugins/modules/test_ibm_svc_manage_safeguarded_policy.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "fa0fbd1abb1da6a478d009c0281ec19bd91da0b8739a17890f7dfe10a84cb54e",
+ "chksum_sha256": "6c84c0dc60dab3e2aed8c51e9140c4f45e5844608b06c563d9c93bbe3d3f0384",
"format": 1
},
{
- "name": "tests/unit/plugins/modules/test_ibm_sv_manage_cloud_backups.py",
+ "name": "tests/unit/plugins/modules/test_ibm_svc_manage_sra.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "48156ff487870ac46f4cb403957537ab300bafa121f5e61766075a649b2a2519",
+ "chksum_sha256": "78774c98fbcf2a543fb6d49b82dea61c6d4a8105d6d2a8bc949f6980840e9037",
"format": 1
},
{
- "name": "tests/unit/plugins/modules/test_ibm_sv_restore_cloud_backup.py",
+ "name": "tests/unit/plugins/modules/test_ibm_svc_manage_user.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "8360e33c724235c589c057ec45b8b415a2e8762f32638c3bc4ec40f2c243d093",
+ "chksum_sha256": "bc0a63fa0a7498b8320927737988824a187052032dbba16eedba25092c9eb6f5",
"format": 1
},
{
- "name": "tests/unit/plugins/modules/test_ibm_svc_manage_migration.py",
+ "name": "tests/unit/plugins/modules/test_ibm_svc_manage_usergroup.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "6f89529af5da44531b020271e058521d7252287ee713f76252ba03f98e415d34",
+ "chksum_sha256": "afff9022506bf5c5a0f38b3a45cae4e326635e960808294532d4276b45e5042e",
"format": 1
},
{
@@ -890,73 +946,66 @@
"format": 1
},
{
- "name": "tests/unit/plugins/modules/test_ibm_sv_manage_fc_partnership.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "859bb227ab162b8fd00818d5a0cb9ae6a8b0b8d3a8765ad942c6ce3fcdaabe7f",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/test_ibm_sv_manage_fcportsetmember.py",
+ "name": "tests/unit/plugins/modules/test_ibm_svc_manage_volumegroup.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "76de84375f4746ad416ea434236323857d5d48e512fef89a9cc2ce7033f5830f",
+ "chksum_sha256": "530795c8b22273d3d95bf67b671182eb86b34a1fc2639b93261ba1ca30c7597c",
"format": 1
},
{
- "name": "tests/unit/plugins/modules/test_ibm_svc_host.py",
+ "name": "tests/unit/plugins/modules/test_ibm_svc_mdisk.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "751253101a547d501ae439c9f926d936e3704f0013812bb2ee8b3456d1901a4a",
+ "chksum_sha256": "51717d02bef44db970d6073ee870592a0282569893351d6946162e3f4e866a4e",
"format": 1
},
{
- "name": "tests/unit/plugins/modules/test_ibm_svc_info.py",
+ "name": "tests/unit/plugins/modules/test_ibm_svc_mdiskgrp.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f58fd3073d1672883ce7faedf52920965ea615dbbca1e140c3025addc5e5745a",
+ "chksum_sha256": "6cc246158f36c1baf31e45ef18ca4baee81cfef022fb0b612775354342af211b",
"format": 1
},
{
- "name": "tests/unit/plugins/modules/test_ibm_svc_manage_portset.py",
+ "name": "tests/unit/plugins/modules/test_ibm_svc_start_stop_flashcopy.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a24941a59674fd82acf4de65aacd0c185488324276f6e4a915d3209ca5042ac7",
+ "chksum_sha256": "7c1d7d6b567ea9a7b59755464df59e951a2173b200cbd91a313986093780c2f5",
"format": 1
},
{
- "name": "tests/unit/plugins/modules/test_ibm_svc_mdiskgrp.py",
+ "name": "tests/unit/plugins/modules/test_ibm_svc_start_stop_replication.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "6cc246158f36c1baf31e45ef18ca4baee81cfef022fb0b612775354342af211b",
+ "chksum_sha256": "5e76618cdc63825c97bd3d3ff0b7631ff7516c93540094aa3137007a5a5462ec",
"format": 1
},
{
- "name": "requirements.yml",
+ "name": "tests/unit/plugins/modules/test_ibm_svc_vol_map.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "134abe94ea511975eb2cc46a0150599afc81fb12e0a09ddbcf97d2ec39cb7357",
+ "chksum_sha256": "c27d92040a95ed23d9e5d860b880f7bcf343da767bf2932aafe83f33fb5b3287",
"format": 1
},
{
- "name": "README.md",
+ "name": "tests/unit/plugins/modules/test_ibm_svcinfo_command.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "60912537cde3dafb92d36179169be90d9400ee2c373bcaa9cf2eec6f57f6f688",
+ "chksum_sha256": "0f7d9be0c049635ad29ea2f7676dab82069fc8747dab75488f005887029d6634",
"format": 1
},
{
- "name": "galaxy-importer.cfg",
+ "name": "tests/unit/plugins/modules/test_ibm_svctask_command.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "7501d13dd591dda472fcdb8b9f44677a50ef86620f7756ba9c1196a41b2cd33c",
+ "chksum_sha256": "1f48e7a9815bb15cdfe937f946041055356318feff170014b3ad2f763484078f",
"format": 1
},
{
- "name": "requirements.txt",
+ "name": "README.md",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "51b6a1455380a67307dd3b9d1ecfe69a26ce9cc5e6ab6fb2d29dc91c63f8fd8f",
+ "chksum_sha256": "77fd2c36c9c826ab2169a44831f18cdff7c5f78e03c8f2695d6006ca9cad4753",
"format": 1
}
],
diff --git a/ansible_collections/ibm/spectrum_virtualize/MANIFEST.json b/ansible_collections/ibm/spectrum_virtualize/MANIFEST.json
index 3676a4f3f..e3d70d599 100644
--- a/ansible_collections/ibm/spectrum_virtualize/MANIFEST.json
+++ b/ansible_collections/ibm/spectrum_virtualize/MANIFEST.json
@@ -2,7 +2,7 @@
"collection_info": {
"namespace": "ibm",
"name": "spectrum_virtualize",
- "version": "1.12.0",
+ "version": "2.0.0",
"authors": [
"Shilpi Jain <shilpi.jain1@ibm.com>",
"Sanjaikumaar M <sanjaikumaar.m@ibm.com>",
@@ -27,7 +27,7 @@
"name": "FILES.json",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "30f8c616553f832f5918e3e029c278e6c812a175a62b3463df5838e0deef3da3",
+ "chksum_sha256": "f989583c1934a9683d7d0dc7bd1aecd6d8af94240e22e36c8c526aef4499c961",
"format": 1
},
"format": 1
diff --git a/ansible_collections/ibm/spectrum_virtualize/README.md b/ansible_collections/ibm/spectrum_virtualize/README.md
index 8755df085..d1f58b1b7 100644
--- a/ansible_collections/ibm/spectrum_virtualize/README.md
+++ b/ansible_collections/ibm/spectrum_virtualize/README.md
@@ -1,152 +1,6 @@
# Ansible Collection - ibm.spectrum_virtualize
-[![Code of conduct](https://img.shields.io/badge/code%20of%20conduct-Ansible-silver.svg)](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html )
+Due to brand renaming of IBM spectrum virtualize to storage virtualize, this collection is being discontinued and archived.
-This collection provides a series of Ansible modules and plugins for interacting with the IBM Spectrum Virtualize family storage products. These products include the IBM SAN Volume Controller, IBM FlashSystem family members built with IBM Spectrum Virtualize (FlashSystem 5xxx, 7xxx, 9xxx), IBM Storwize family, and IBM Spectrum Virtualize for Public Cloud. For more information regarding these products, see [IBM Documentation](https://www.ibm.com/docs/).
-
-## Requirements
-
-- Ansible version 2.9 or higher
-
-## Installation
-
-To install the IBM Spectrum Virtualize collection hosted in Galaxy:
-
-```bash
-ansible-galaxy collection install ibm.spectrum_virtualize
-```
-
-To upgrade to the latest version of the IBM Spectrum Virtualize collection:
-
-```bash
-ansible-galaxy collection install ibm.spectrum_virtualize --force
-```
-
-## Usage
-
-### Playbooks
-
-To use a module from the IBM Spectrum Virtualize collection, please reference the full namespace, collection name, and module name that you want to use:
-
-```yaml
----
-- name: Using the IBM Spectrum Virtualize collection
- hosts: localhost
- tasks:
- - name: Gather info from storage
- ibm.spectrum_virtualize.ibm_svc_info:
- clustername: x.x.x.x
- domain:
- username: username
- password: password
- log_path: /tmp/playbook.debug
- gather_subset: all
-```
-
-Alternatively, you can add a full namepsace and collection name in the `collections` element:
-
-```yaml
----
-- name: Using the IBM Spectrum Virtualize collection
- collections:
- - ibm.spectrum_virtualize
- gather_facts: no
- connection: local
- hosts: localhost
- tasks:
- - name: Gather info from storage
- ibm_svc_info:
- clustername: x.x.x.x
- domain:
- username: username
- password: password
- log_path: /tmp/playbook.debug
- gather_subset: all
-```
-
-## Supported Resources
-
-### Modules
-
-- ibm_svc_auth - Generates an authentication token for a user on Spectrum Virtualize storage systems
-- ibm_svc_complete_initial_setup - Completes the initial setup configuration for LMC systems
-- ibm_svc_host - Manages hosts on Spectrum Virtualize storage systems
-- ibm_svc_hostcluster - Manages host cluster on Spectrum Virtualize storage systems
-- ibm_svc_info - Collects information on Spectrum Virtualize storage systems
-- ibm_svc_initial_setup - Manages initial setup configuration on Spectrum Virtualize storage systems
-- ibm_svc_manage_callhome - Manages configuration of Call Home feature on Spectrum Virtualize storage systems
-- ibm_svc_manage_consistgrp_flashcopy - Manages FlashCopy consistency groups on Spectrum Virtualize storage systems
-- ibm_svc_manage_cv - Manages the change volume in remote copy replication on Spectrum Virtualize storage systems
-- ibm_svc_manage_flashcopy - Manages FlashCopy mappings on Spectrum Virtualize storage systems
-- ibm_svc_manage_ip - Manages IP provisioning on Spectrum Virtualize storage systems
-- ibm_svc_manage_migration - Manages volume migration between clusters on Spectrum Virtualize storage systems
-- ibm_svc_manage_mirrored_volume - Manages mirrored volumes on Spectrum Virtualize storage systems
-- ibm_svc_manage_ownershipgroup - Manages ownership groups on Spectrum Virtualize storage systems
-- ibm_svc_manage_portset - Manages IP portset on Spectrum Virtualize storage systems
-- ibm_svc_manage_replication - Manages remote copy replication on Spectrum Virtualize storage systems
-- ibm_svc_manage_replicationgroup - Manages remote copy consistency groups on Spectrum Virtualize storage systems
-- ibm_svc_manage_safeguarded_policy - Manages safeguarded policy configuration on Spectrum Virtualize storage systems
-- ibm_svc_manage_sra - Manages the remote support assistance configuration on Spectrum Virtualize storage systems
-- ibm_svc_manage_user - Manages user on Spectrum Virtualize storage systems
-- ibm_svc_manage_usergroup - Manages user groups on Spectrum Virtualize storage systems
-- ibm_svc_manage_volume - Manages standard volumes on Spectrum Virtualize storage systems
-- ibm_svc_manage_volumegroup - Manages volume groups on Spectrum Virtualize storage systems
-- ibm_svc_mdisk - Manages MDisks for Spectrum Virtualize storage systems
-- ibm_svc_mdiskgrp - Manages pools for Spectrum Virtualize storage systems
-- ibm_svc_start_stop_flashcopy - Starts or stops FlashCopy mapping and consistency groups on Spectrum Virtualize storage systems
-- ibm_svc_start_stop_replication - Starts or stops remote-copy independent relationships or consistency groups on Spectrum Virtualize storage systems
-- ibm_svc_vol_map - Manages volume mapping for Spectrum Virtualize storage systems
-- ibm_svcinfo_command - Runs svcinfo CLI command on Spectrum Virtualize storage systems over SSH session
-- ibm_svctask_command - Runs svctask CLI command(s) on Spectrum Virtualize storage systems over SSH session
-- ibm_sv_manage_awss3_cloudaccount - Manages Amazon S3 cloud account configuration on Spectrum Virtualize storage systems
-- ibm_sv_manage_cloud_backup - Manages cloud backups on Spectrum Virtualize storage systems
-- ibm_sv_manage_fc_partnership - Manages Fibre Channel (FC) partnership on Spectrum Virtualize storage systems
-- ibm_sv_manage_fcportsetmember - Manages addition or removal of ports from the Fibre Channel (FC) portsets on Spectrum Virtualize storage systems
-- ibm_sv_manage_ip_partnership - Manages IP partnership configuration on Spectrum Virtualize storage systems
-- ibm_sv_manage_provisioning_policy - Manages provisioning policy configuration on Spectrum Virtualize storage systems
-- ibm_sv_manage_replication_policy - Manages policy-based replication configuration on Spectrum Virtualize storage systems
-- ibm_sv_manage_snapshot - Manages snapshots (mutual consistent images of a volume) on Spectrum Virtualize storage systems
-- ibm_sv_manage_snapshotpolicy - Manages snapshot policy configuration on Spectrum Virtualize storage systems
-- ibm_sv_manage_ssl_certificate - Exports an existing system certificate on to Spectrum Virtualize storage systems
-- ibm_sv_manage_truststore_for_replication - Manages certificate trust stores for replication on Spectrum Virtualize family storage systems
-- ibm_sv_restore_cloud_backup - Restores cloud backups on Spectrum Virtualize storage systems
-- ibm_sv_switch_replication_direction - Switches the replication direction on Spectrum Virtualize storage systems
-
-### Other Feature Information
-- SV Ansible Collection v1.8.0 provides the new 'ibm_svc_complete_initial_setup' module, to complete the automation of Day 0 configuration on Licensed Machine Code (LMC) systems.
- For non-LMC systems, login to the user-interface is required in order to complete the automation of Day 0 configuration.
-- SV Ansible Collection v1.7.0 provided `Setup and Configuration Automation` through different modules. This feature helps user to automate Day 0 configuration.
- This feature includes three modules:
- - ibm_svc_initial_setup
- - ibm_svc_manage_callhome
- - ibm_svc_manage_sra
-- By proceeding and using these modules, the user acknowledges that [IBM Privacy Statement](https://www.ibm.com/privacy) has been read and understood.
-
-### Prerequisite
-
-- Paramiko must be installed to use ibm_svctask_command and ibm_svcinfo_command modules.
-
-## Limitation
-
-The modules in the IBM Spectrum Virtualize Ansible collection leverage REST APIs to connect to the IBM Spectrum Virtualize storage system. This has following limitations:
-1. Using the REST APIs to list more than 2000 objects may create a loss of service from the API side, as it automatically restarts due to memory constraints.
-2. It is not possible to access REST APIs using an IPv6 address on a cluster.
-3. The Ansible collection can run on all IBM Spectrum Virtualize storage system versions above 8.1.3, except versions 8.3.1.3, 8.3.1.4 and 8.3.1.5.
-4. At time of release of the SV Ansible v1.8.0 collection, no module is available for non LMC systems to automate license agreements acceptance, including EULA.
- User will be presented with a GUI setup wizard upon user-interface login, whether the Ansible modules have been used for initial configuration or not.
-
-## Releasing, Versioning, and Deprecation
-
-1. IBM Spectrum Virtualize Ansible Collection releases follow a quarterly release cycle.
-2. IBM Spectrum Virtualize Ansible Collection releases follow [semantic versioning](https://semver.org/).
-3. IBM Spectrum Virtualize Ansible modules deprecation cycle is aligned with [Ansible](https://docs.ansible.com/ansible/latest/dev_guide/module_lifecycle.html).
-
-## Contributing
-
-Currently we are not accepting community contributions.
-Though, you may periodically review this content to learn when and how contributions can be made in the future.
-IBM Spectrum Virtualize Ansible Collection maintainers can follow the [Maintainer guidelines](https://docs.ansible.com/ansible/devel/community/maintainers.html).
-
-## License
-
-GNU General Public License v3.0
+It is being replaced with new repository. The new collection is fully compatible with the old one. All the new features and development will be done on the new collection.
+[ansible-collections/ibm.storage_virtualize](https://github.com/ansible-collections/ibm.storage_virtualize)
diff --git a/ansible_collections/ibm/spectrum_virtualize/changelogs/changelog.yaml b/ansible_collections/ibm/spectrum_virtualize/changelogs/changelog.yaml
index d7344d539..83cdcae33 100644
--- a/ansible_collections/ibm/spectrum_virtualize/changelogs/changelog.yaml
+++ b/ansible_collections/ibm/spectrum_virtualize/changelogs/changelog.yaml
@@ -136,3 +136,9 @@ releases:
- description: Manages addition or removal of ports from the Fibre Channel (FC) portsets on Spectrum Virtualize storage systems
name: ibm_sv_manage_fcportsetmember
namespace: ''
+ 2.0.0:
+ release_date: '2023-06-30'
+ changes:
+ release_summary: The collection is now archived and replaced with a new repository due to rebranding of spectrum virtualize to
+ storage virtualize.
+ minor_changes: The module ibm_svc_vdisk is also removed from the collection after being marked deprecated for last 4 cycles
diff --git a/ansible_collections/ibm/spectrum_virtualize/meta/runtime.yml b/ansible_collections/ibm/spectrum_virtualize/meta/runtime.yml
index 385b4e4bf..576832bc7 100644
--- a/ansible_collections/ibm/spectrum_virtualize/meta/runtime.yml
+++ b/ansible_collections/ibm/spectrum_virtualize/meta/runtime.yml
@@ -1,8 +1,2 @@
---
requires_ansible: '>=2.9.0'
-plugin_routing:
- modules:
- ibm_svc_vdisk:
- deprecation:
- removal_version: 2.0.0
- warning_text: Use ibm_svc_manage_volume instead. \ No newline at end of file
diff --git a/ansible_collections/ibm/spectrum_virtualize/playbooks/multi_volume_create_host_mapping_zone_multipath/Readme.txt b/ansible_collections/ibm/spectrum_virtualize/playbooks/multi_volume_create_host_mapping_zone_multipath/Readme.txt
new file mode 100644
index 000000000..e6bc84319
--- /dev/null
+++ b/ansible_collections/ibm/spectrum_virtualize/playbooks/multi_volume_create_host_mapping_zone_multipath/Readme.txt
@@ -0,0 +1,28 @@
+Objective:
+This playbook creates FC host, multiple volumes, zones on Flashsystem Cluster and performs mapping of all volumes to host.
+
+Prerequisite:
+- IBM Spectrum Virtualize and Brocade ansible collection plugins must be installed
+- For more information on Brocade switch ansible collection, please refer to https://github.com/brocade/ansible/blob/master/README.rst
+
+These playbooks maps multiple volumes of cluster to fc host
+- It uses spectrum virtualize ansible modules as well as brocade ansible modules to create zone
+
+There are total 2 files used for this use-case
+
+1. multiple_vol_creation_zone_map_vars
+ This file has all the variables required for playbooks
+ - cluster_* : Parameters starting with cluster contain cluster details where user wants to create volume, hosst etc
+ - brocade_switch_* : Parameters starting with brocade_switch contain brocade switch details
+ - application_host_*: Parameters starting with application_host contain application host details which is performing read/write of data
+ - volume_details : Parameters starting with volume contain volume details which will be mapped to host
+ - portset_* : Parameters starting with portset contain portset details required for creating fc host
+
+2. multi_volume_create_host_mapping_zone_multipath
+ - This playbook fetches the list of SCSI_HOST WWPN's associated with given fcioportid from specV cluster
+ - Creates zone with the name given and add specV ports fetched and host WWPN's given
+ - Creates multiple volumes based on volume details provided
+ - Maps the multiple volumes to Host to form multiple paths
+
+Authors: Ajinkya Nanavati (ananava1@in.ibm.com)
+ Mohit Chitlange (mochitla@in.ibm.com)
diff --git a/ansible_collections/ibm/spectrum_virtualize/playbooks/multi_volume_create_host_mapping_zone_multipath/multiple_vol_creation_zone_map.yml b/ansible_collections/ibm/spectrum_virtualize/playbooks/multi_volume_create_host_mapping_zone_multipath/multiple_vol_creation_zone_map.yml
new file mode 100644
index 000000000..597e668ee
--- /dev/null
+++ b/ansible_collections/ibm/spectrum_virtualize/playbooks/multi_volume_create_host_mapping_zone_multipath/multiple_vol_creation_zone_map.yml
@@ -0,0 +1,204 @@
+- name: Using Spectrum Virtualize collection to migrate given volumes
+ hosts: localhost
+ vars_files:
+ - multiple_vol_creation_zone_map_vars
+ collections:
+ - ibm.spectrum_virtualize
+ - brocade.fos
+ vars:
+ brocade_credentials:
+ fos_ip_addr: "{{ brocade_switch_ip }}"
+ fos_user_name: "{{ brocade_switch_username }}"
+ fos_password: "{{ brocade_switch_password }}"
+ https: False
+ gather_facts: no
+ connection: local
+ tasks:
+
+ - name: Fetch authorization token for source
+ register: specv_token
+ ibm_svc_auth:
+ clustername: "{{ cluster_ip }}"
+ username: "{{ cluster_username }}"
+ password: "{{ cluster_password }}"
+
+ - name: Get deatils of the targetportfc
+ register: fcdetails
+ ibm.spectrum_virtualize.ibm_svc_info:
+ clustername: "{{ cluster_ip }}"
+ token: "{{ specv_token.token }}"
+ gather_subset: [targetportfc]
+ log_path: /tmp/fcdetails.debug
+
+ - name: get the WWPN list from lstargetportfc for given fc_port_id
+ set_fact:
+ specv_wwpn: "{{ specv_wwpn|default([]) + [item['WWPN']]}}"
+ when: (item.protocol == 'scsi' and item.host_io_permitted == 'yes' and item.fc_io_port_id in cluster_fcioportid)
+ loop: "{{ fcdetails.TargetPortFC }}"
+
+ - name: modify specv wwpn the way switch want
+ set_fact:
+ specv_wwpn_switch_format: "{{ specv_wwpn_switch_format|default([]) +[item|map('join')|join(':')] }}"
+ loop: "{{ (specv_wwpn)|map('batch', 2)|map('list')|list|lower }}"
+
+ - name: get all zoning information from switch
+ brocade_facts:
+ credential: "{{brocade_credentials}}"
+ vfid: -1
+ gather_subset:
+ - brocade_zoning
+
+ - name: copy the active config in var active_switch_config
+ set_fact:
+ active_switch_config: "{{ ansible_facts.brocade_zoning['effective-configuration'].cfg_name }}"
+
+ - name: Create zones on Brocade switch
+ vars:
+ zone:
+ - name: "{{ application_host_zone_name }}"
+ members: "{{ application_host_wwpns + specv_wwpn_switch_format }}"
+ brocade.fos.brocade_zoning_zone:
+ credential: "{{ brocade_credentials }}"
+ vfid: -1
+ zones: "{{ zone }}"
+ members_add_only: True
+
+ - name: Add zone to active configuration
+ vars:
+ cfgs:
+ - name: "{{ active_switch_config }}"
+ members:
+ - "{{ application_host_zone_name }}"
+ brocade_zoning_cfg:
+ credential: "{{ brocade_credentials }}"
+ vfid: -1
+ members_add_only: True
+ cfgs: "{{ cfgs }}"
+ active_cfg: "{{ active_switch_config }}"
+
+ - name: create host list for specv without colon format
+ set_fact:
+ application_host_wwpns_specvformat_list: "{{ application_host_wwpns_specvformat_list | default([]) + [(item | replace(':',''))|upper]}}"
+ loop: "{{application_host_wwpns }}"
+
+ - name: create host list for specv without colon format
+ set_fact:
+ application_host_wwpns_specvformat: "{{application_host_wwpns_specvformat |default('')+item +':'}}"
+ loop: "{{application_host_wwpns_specvformat_list| select() }}"
+
+ - set_fact:
+ application_host_wwpns_specvformat: "{{ application_host_wwpns_specvformat[:-1]}}"
+
+ - name: Creating Host on specv
+ ibm_svc_host:
+ clustername: "{{ cluster_ip }}"
+ token: "{{ specv_token.token }}"
+ name: "{{ host_name }}"
+ state: present
+ fcwwpn: "{{ application_host_wwpns_specvformat }}"
+
+ - name: Create a fc porset
+ ibm.spectrum_virtualize.ibm_svc_manage_portset:
+ clustername: "{{ cluster_ip }}"
+ token: "{{ specv_token.token }}"
+ name: "{{ portset_name }}"
+ porttype: fc
+ portset_type: host
+ state: present
+
+ - name: Add port ID to the portset
+ ibm.spectrum_virtualize.ibm_sv_manage_fcportsetmember:
+ clustername: "{{ cluster_ip }}"
+ token: "{{ specv_token.token }}"
+ name: "{{ portset_name }}"
+ fcportid: "{{item}}"
+ state: present
+ loop: "{{ cluster_fcioportid }}"
+
+ - name: Create vdisk
+ register: results_cvdisk
+ ibm_svc_vdisk:
+ clustername: "{{cluster_ip}}"
+ token: "{{ specv_token.token }}"
+ domain:
+ state: present
+ name: "{{item.vol_name}}"
+ mdiskgrp: "{{item.mdiskgrp}}"
+ easytier: 'off'
+ size: "{{item.size}}"
+ unit: "{{item.unit}}"
+ loop: "{{ volume_details }}"
+
+ - name: map Host to Vdisk
+ ibm_svc_vol_map:
+ clustername: "{{cluster_ip}}"
+ token: "{{ specv_token.token }}"
+ domain:
+ state: present
+ volname: "{{item.vol_name}}"
+ host: "{{host_name}}"
+ loop: "{{ volume_details }}"
+
+ - name: Rescan the paths on the host and run multipath
+ shell: "ssh {{application_host_username}}@{{application_host_ip}} rescan-scsi-bus.sh -i --forcerescan;sleep 40;"
+
+ - shell: "ssh {{application_host_username}}@{{application_host_ip}} multipath -ll"
+ register: ps
+
+ - name: Separate facts
+ set_fact:
+ multipath_var: "{{ ps.stdout.split('mpath') }}"
+
+ - debug:
+ msg: "{{ multipath_var}}"
+
+ - name: Get deatils of the given volume
+ register: volinfo
+ ibm.spectrum_virtualize.ibm_svc_info:
+ clustername: "{{ cluster_ip }}"
+ token: "{{ specv_token.token }}"
+ gather_subset: [vol]
+ log_path: /tmp/volinfo.debug
+
+ - name: create volume list
+ set_fact:
+ vol_name_list: "{{ vol_name_list|default([])+ [item['vol_name']] }}"
+ loop: "{{ volume_details }}"
+
+ - debug:
+ msg: "{{ vol_name_list }}"
+
+ - name: find vollist data
+ set_fact:
+ vol_list_full_data: "{{ vol_list_full_data|default([])+ [item] }}"
+ vol_name_uid: "{{ vol_name_uid|default([])+[[item['volume_name'],item['vdisk_UID']|lower]]}}"
+ when: (item.volume_name in vol_name_list )
+ loop: "{{ volinfo.Volume }}"
+
+ - debug:
+ msg: "{{ vol_name_uid }}"
+
+ - name: Find vdisk UID present in host with path
+ set_fact:
+ dm_device: "{{dm_device| default([]) +[ [item.0] + [item.1] + [item.2]]}}"
+ when: (item.1 in item.2)
+ with_nested:
+ - "{{ vol_name_uid }}"
+ - "{{ multipath_var }}"
+
+ - name: find unmapped volume
+ set_fact:
+ vdisk_mapped_multipath: "{{vdisk_mapped_multipath| default([]) + [item[0]]}}"
+ loop: "{{ dm_device }}"
+
+ - debug:
+ msg: "{{ vdisk_mapped_multipath }}"
+
+ - name: find unmapped volume
+ set_fact:
+ unmaped_vol_name_list: "{{ unmaped_vol_name_list|default([])+ [item] }}"
+ when: (item not in vdisk_mapped_multipath)
+ loop: "{{ vol_name_list }}"
+
+ - debug:
+ msg: "{{ unmaped_vol_name_list }}"
diff --git a/ansible_collections/ibm/spectrum_virtualize/playbooks/multi_volume_create_host_mapping_zone_multipath/multiple_vol_creation_zone_map_vars.txt b/ansible_collections/ibm/spectrum_virtualize/playbooks/multi_volume_create_host_mapping_zone_multipath/multiple_vol_creation_zone_map_vars.txt
new file mode 100644
index 000000000..8a4fcdb18
--- /dev/null
+++ b/ansible_collections/ibm/spectrum_virtualize/playbooks/multi_volume_create_host_mapping_zone_multipath/multiple_vol_creation_zone_map_vars.txt
@@ -0,0 +1,30 @@
+application_host_details:
+application_host_name: linux_host
+application_host_ip: a.b.c.d
+application_host_username: username
+application_host_password: password
+application_host_zone_name: test
+application_host_wwpns: ["10:00:00:90:fa:94:20:d0","10:00:00:90:fa:94:20:d2"]
+
+cluster_ip: x.x.x.x
+cluster_username: username1
+cluster_password: password1
+cluster_fcioportid: ['1']
+
+host_name: linux_ansible
+portset_name: portset_ansible
+portset_type: host
+port_type: fc
+brocade_switch_ip: z.z.z.z
+brocade_switch_username: username2
+brocade_switch_password: password2
+
+volume_details:
+ - vol_name: vdisk_3
+ mdiskgrp: "0"
+ size: "5"
+ unit: "gb"
+ - vol_name: vdisk_4
+ mdiskgrp: "0"
+ size: "5"
+ unit: "gb"
diff --git a/ansible_collections/ibm/spectrum_virtualize/playbooks/volume_migration_on_svc_iscsi/Readme.txt b/ansible_collections/ibm/spectrum_virtualize/playbooks/volume_migration_on_svc_iscsi/Readme.txt
new file mode 100644
index 000000000..8b2970259
--- /dev/null
+++ b/ansible_collections/ibm/spectrum_virtualize/playbooks/volume_migration_on_svc_iscsi/Readme.txt
@@ -0,0 +1,45 @@
+Objective:
+Migrate volume from one Flash System to another Flash System in application transparent manner with target host as ISCSI.
+
+Prerequisite:
+- IBM Spectrum Virtualize ansible collection plugins must be installed
+
+These playbooks migrate a volume from a source cluster to the destination cluster.
+These playbooks are designed to migrate volume mapped to Fibre Channel (FC) host or ISCSI host from source cluster to ISCSI host on destination cluster.
+
+There are total 3 files used for this use-case.
+ 1. vol_migration_vars:
+ This file has all the variables required for playbooks
+ - src_cluster_* : Parameters starting with src_cluster contain source cluster details from where user wants to migrate volume
+ - src_cluster_* : Parameters starting with src_cluster contain source cluster details from where user wants to migrate volume
+ - dest_cluster* : Parameters starting with dest_cluster contain destination cluster details to where volume will be migrated
+ - application_host_* : Parameters starting with application_host contain application host details which is performing read/write of data
+ - application_iscsi_ip : This contains in detail information for ip to be given to node with detail information as follows
+ - node_name: Node name of cluster
+ - portset: portset name to be used
+ - ip_address: <ip address>
+ - subnet_prefix: <prefix>
+ - gateway: <gateway>
+ - port: <port_id>
+ - src_vol_name : This suggest volume name of source cluster which is to be migrated
+ - dest_vol_name : This create volume name at destination cluster
+ - rel_name : This is name of relationship to be created between source and destination cluster
+ 2. initiate_migration_for_given_volume:
+ - This playbook initiates the migration
+ - Most importantly, it also starts data copy from source cluster to destination cluster
+ Note:
+ User should not run playbook create_zone_map_volume_and_rescan until relationship is in consistent_syncronized state
+ 3. create_host_map_volume_and_rescan
+ - Execute this playbook once the relationship created by above playbook is in consistent_syncronized state
+ - create iscsi host on flashsystem from iqn defined in variable application_host_iqn from variable file
+ - configuring ip on each node for iscsi host connectivity
+ - establish iscsi session from host to flashsystem nodes
+ - Maps the volume to the Host and starts scsi rescan on the host
+ - Switch replication direction of a migration relationship once host is mapped
+ - Again rescan the volume on the host to get the updated path details
+ - Delete source volume and migration relationship which was created
+ - Again rescan the multipath and expect migrated volume has the only path from destiantion cluster
+
+ Authors: Ajinkya Nanavati (ananava1@in.ibm.com)
+ Mohit Chitlange (mochitla@in.ibm.com)
+ Devendra Mahajan (demahaj1@in.ibm.com)
diff --git a/ansible_collections/ibm/spectrum_virtualize/playbooks/volume_migration_on_svc_iscsi/create_iscsi_host_map_vol_switch.yml b/ansible_collections/ibm/spectrum_virtualize/playbooks/volume_migration_on_svc_iscsi/create_iscsi_host_map_vol_switch.yml
new file mode 100644
index 000000000..b7a0f1bb3
--- /dev/null
+++ b/ansible_collections/ibm/spectrum_virtualize/playbooks/volume_migration_on_svc_iscsi/create_iscsi_host_map_vol_switch.yml
@@ -0,0 +1,143 @@
+- name: Using Spectrum Virtualize collection to migrate given volume
+ hosts: localhost
+ vars_files:
+ - vol_migration_vars
+ collections:
+ - ibm.spectrum_virtualize
+
+ gather_facts: no
+ vars:
+ dest_vol_name: "{{ dest_vol_name if dest_vol_name is defined else src_vol_name }}"
+ dest_host_name: "{{ host_name }}"
+ connection: local
+ tasks:
+ - name: Fetch authorization token for source
+ register: src_token
+ ibm_svc_auth:
+ clustername: "{{ src_cluster_ip }}"
+ username: "{{ src_cluster_username }}"
+ password: "{{ src_cluster_password }}"
+
+ - name: Fetch authorization token for destination
+ register: dest_token
+ ibm_svc_auth:
+ clustername: "{{ dest_cluster_ip }}"
+ username: "{{ dest_cluster_username }}"
+ password: "{{ dest_cluster_password }}"
+
+ - name: Get deatils of the given volume
+ register: volinfo
+ ibm.spectrum_virtualize.ibm_svc_info:
+ clustername: "{{ dest_cluster_ip }}"
+ token: "{{ dest_token.token }}"
+ gather_subset: [vol]
+ objectname: "{{ dest_vol_name }}"
+ log_path: /tmp/volinfo.debug
+
+ - name: Get the volume UID data
+ set_fact:
+ vol_uid: "{{ volinfo.Volume[0]['vdisk_UID'] | lower }}"
+ when: volinfo.Volume[0] is defined
+
+ - name: Creating Host on SVC
+ ibm_svc_host:
+ clustername: "{{ dest_cluster_ip }}"
+ token: "{{ dest_token.token }}"
+ name: "{{ dest_host_name }}"
+ state: present
+ iscsiname: "{{ application_host_iqn }}"
+
+ - name: map Vdisk to host
+ ibm_svc_vol_map:
+ clustername: "{{ dest_cluster_ip }}"
+ token: "{{ dest_token.token }}"
+ state: present
+ volname: "{{ dest_vol_name }}"
+ host: "{{ dest_host_name }}"
+ scsi: 0
+
+ - name: Create IP provisioning
+ ibm.spectrum_virtualize.ibm_svc_manage_ip:
+ clustername: "{{ dest_cluster_ip }}"
+ token: "{{ dest_token.token }}"
+ log_path: /tmp/playbook.debug
+ node: "{{ item.node_name }}"
+ port: "{{ item.port }}"
+ portset: "{{ item.portset }}"
+ ip_address: "{{ item.ip_address }}"
+ subnet_prefix: "{{ item.subnet_prefix }}"
+ gateway: "{{ item.gateway }}"
+ state: present
+ loop: "{{ application_iscsi_ip }}"
+
+ - name: Create iscsi session
+ shell: ssh {{ application_host_username }}@{{ application_host_ip }} "iscsiadm --mode discovery --type sendtargets --portal {{item.ip_address}} -l"
+ loop: "{{ application_iscsi_ip }}"
+
+ - shell: ssh {{ application_host_username }}@{{ application_host_ip }} "multipath -ll"
+ register: ps
+
+ - name: Separate facts
+ set_fact:
+ multipath_var: "{{ ps.stdout.split('mpath') }}"
+
+ - debug:
+ msg: "{{ multipath_var}}"
+
+ - name: Find vdisk UID present in host with path
+ set_fact:
+ dm_device: "{{item}}"
+ loop: "{{ multipath_var }}"
+ when: vol_uid in item
+
+ - debug:
+ msg: "{{ dm_device}}"
+
+ - name: Switch replication direction of a migration relationship
+ ibm_svc_manage_migration:
+ relationship_name: "{{ rel_name if rel_name is defined else src_vol_name }}"
+ clustername: "{{ src_cluster_ip }}"
+ token: "{{ src_token.token }}"
+ state: switch
+
+ - shell: ssh {{ application_host_username }}@{{ application_host_ip }} "rescan-scsi-bus.sh -i --forcerescan; sleep 40;"
+ - shell: ssh {{ application_host_username }}@{{ application_host_ip }} "multipath -ll"
+ register: ps
+
+ - name: Separate facts
+ set_fact:
+ multipath_var: "{{ ps.stdout.split('mpath') }}"
+
+ - name: Find vdisk UID present in host with path
+ set_fact:
+ dm_device: "{{item}}"
+ loop: "{{ multipath_var }}"
+ when: vol_uid in item
+
+ - debug:
+ msg: "{{ dm_device }}"
+
+ - name: Delete source volume and migration relationship
+ ibm_svc_manage_migration:
+ clustername: "{{ src_cluster_ip }}"
+ state: cleanup
+ source_volume: "{{ src_vol_name }}"
+ token: "{{ src_token.token }}"
+ log_path: /tmp/ansible.log
+
+ - shell: ssh {{ application_host_username }}@{{ application_host_ip }} "rescan-scsi-bus.sh -i --forcerescan; sleep 40;"
+ - shell: ssh {{ application_host_username }}@{{ application_host_ip }} "multipath -ll"
+ register: ps
+
+ - name: Separate facts
+ set_fact:
+ multipath_var: "{{ ps.stdout.split('mpath') }}"
+
+ - name: Find vdisk UID present in host with path
+ set_fact:
+ dm_device: "{{item}}"
+ loop: "{{ multipath_var }}"
+ when: vol_uid in item
+
+ - debug:
+ msg: "{{ dm_device}}"
diff --git a/ansible_collections/ibm/spectrum_virtualize/playbooks/volume_migration_on_svc_iscsi/initiate_migration_for_given_volume.yml b/ansible_collections/ibm/spectrum_virtualize/playbooks/volume_migration_on_svc_iscsi/initiate_migration_for_given_volume.yml
new file mode 100644
index 000000000..b01e6122b
--- /dev/null
+++ b/ansible_collections/ibm/spectrum_virtualize/playbooks/volume_migration_on_svc_iscsi/initiate_migration_for_given_volume.yml
@@ -0,0 +1,33 @@
+- name: Using Spectrum Virtualize collection to initiate migration
+ hosts: localhost
+ vars_files:
+ - vol_migration_vars
+ collections:
+ - ibm.spectrum_virtualize
+ gather_facts: no
+ connection: local
+ tasks:
+ - name: Fetch authorization token for source
+ register: src_token
+ ibm_svc_auth:
+ clustername: "{{ src_cluster_ip }}"
+ username: "{{ src_cluster_username }}"
+ password: "{{ src_cluster_password }}"
+ - name: Fetch authorization token for destination
+ register: dest_token
+ ibm_svc_auth:
+ clustername: "{{ dest_cluster_ip }}"
+ username: "{{ dest_cluster_username }}"
+ password: "{{ dest_cluster_password }}"
+ - name: Initiate a volume migration with replicate_hosts as false
+ ibm_svc_manage_migration:
+ source_volume: "{{ src_vol_name }}"
+ target_volume: "{{ dest_vol_name if dest_vol_name is defined else src_vol_name }}"
+ clustername: "{{ src_cluster_ip }}"
+ remote_cluster: "{{ dest_cluster_name }}"
+ token: "{{ src_token.token }}"
+ state: initiate
+ replicate_hosts: false
+ remote_token: "{{ dest_token.token }}"
+ relationship_name: "{{ rel_name if rel_name is defined else src_vol_name }}"
+ remote_pool: "{{ dest_cluster_pool_name }}"
diff --git a/ansible_collections/ibm/spectrum_virtualize/playbooks/volume_migration_on_svc_iscsi/vol_migration_vars.txt b/ansible_collections/ibm/spectrum_virtualize/playbooks/volume_migration_on_svc_iscsi/vol_migration_vars.txt
new file mode 100644
index 000000000..14905b86b
--- /dev/null
+++ b/ansible_collections/ibm/spectrum_virtualize/playbooks/volume_migration_on_svc_iscsi/vol_migration_vars.txt
@@ -0,0 +1,36 @@
+src_cluster_name: Master
+src_cluster_ip: x.x.x.x
+src_cluster_username: username
+src_cluster_password: password
+
+dest_cluster_name: Aux_far
+dest_cluster_ip: y.y.y.y
+dest_cluster_username: username1
+dest_cluster_password: password1
+dest_cluster_pool_name: mdiskgrp0
+
+application_host_details:
+application_host_name: linux_host
+application_host_ip: a.b.c.d
+application_host_username: username2
+application_host_password: password2
+application_host_iqn: "iqn.1994-05.com.redhat:5e54d1815f55"
+
+application_iscsi_ip:
+ - node_name: node1
+ portset: portset0
+ ip_address: 192.168.100.121
+ subnet_prefix: 24
+ gateway: 192.168.100.1
+ port: 6
+ - node_name: node2
+ portset: portset0
+ ip_address: 192.168.100.122
+ subnet_prefix: 24
+ gateway: 192.168.100.1
+ port: 6
+
+src_vol_name: vdisk_application1
+host_name: linux_host
+dest_vol_name: vdisk_application1
+rel_name: r1
diff --git a/ansible_collections/ibm/spectrum_virtualize/plugins/modules/ibm_svc_vdisk.py b/ansible_collections/ibm/spectrum_virtualize/plugins/modules/ibm_svc_vdisk.py
deleted file mode 100644
index 45c3d8a38..000000000
--- a/ansible_collections/ibm/spectrum_virtualize/plugins/modules/ibm_svc_vdisk.py
+++ /dev/null
@@ -1,421 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright (C) 2020 IBM CORPORATION
-# Author(s): Peng Wang <wangpww@cn.ibm.com>
-# Sreshtant Bohidar <sreshtant.bohidar@ibm.com>
-# Rohit kumar <rohit.kumar6@ibm.com>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-DOCUMENTATION = '''
----
-module: ibm_svc_vdisk
-short_description: This module manages volumes on IBM Spectrum Virtualize
- Family storage systems
-description:
- - Ansible interface to manage 'mkvdisk' and 'rmvdisk' volume commands.
-version_added: "1.0.0"
-options:
- name:
- description:
- - Specifies the name to assign to the new volume.
- required: true
- type: str
- state:
- description:
- - Creates (C(present)) or removes (C(absent)) a volume.
- choices: [ absent, present ]
- required: true
- type: str
- clustername:
- description:
- - The hostname or management IP of the Spectrum Virtualize storage system.
- type: str
- required: true
- domain:
- description:
- - Domain for the Spectrum Virtualize storage system.
- - Valid when hostname is used for the parameter I(clustername).
- type: str
- username:
- description:
- - REST API username for the Spectrum Virtualize storage system.
- - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
- type: str
- password:
- description:
- - REST API password for the Spectrum Virtualize storage system.
- - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
- type: str
- token:
- description:
- - The authentication token to verify a user on the Spectrum Virtualize storage system.
- - To generate a token, use ibm_svc_auth module.
- type: str
- version_added: '1.5.0'
- mdiskgrp:
- description:
- - Specifies the name of the storage pool to use when
- creating this volume. This parameter is required when I(state=present).
- type: str
- easytier:
- description:
- - Defines use of easytier with VDisk.
- - Applies when I(state=present).
- type: str
- choices: [ 'on', 'off' ]
- size:
- description:
- - Defines the size of VDisk. This parameter is required when I(state=present).
- - This parameter can also be used to resize an existing VDisk.
- type: str
- unit:
- description:
- - Defines the size option for the storage unit. This parameter is required when I(state=present).
- type: str
- choices: [ b, kb, mb, gb, tb, pb ]
- default: mb
- validate_certs:
- description:
- - Validates certification.
- default: false
- type: bool
- log_path:
- description:
- - Path of debug log file.
- type: str
- rsize:
- description:
- - Defines how much physical space is initially allocated to the thin-provisioned volume in %.
- If rsize is not passed, the volume created is a standard volume.
- - Applies when C(state=present).
- type: str
- version_added: '1.2.0'
- autoexpand:
- description:
- - Specifies that thin-provisioned volume copies can automatically expand their real capacities.
- type: bool
- version_added: '1.2.0'
-author:
- - Sreshtant Bohidar(@Sreshtant-Bohidar)
- - Rohit Kumar(@rohitk-github)
-notes:
- - This module supports C(check_mode).
-deprecated:
- removed_in: 2.0.0
- why: New module released
- alternative: Use M(ibm.spectrum_virtualize.ibm_svc_manage_volume) instead.
-'''
-
-EXAMPLES = '''
-- name: Create a volume
- ibm.spectrum_virtualize.ibm_svc_vdisk:
- clustername: "{{clustername}}"
- domain: "{{domain}}"
- username: "{{username}}"
- password: "{{password}}"
- log_path: /tmp/playbook.debug
- name: volume0
- state: present
- mdiskgrp: Pool0
- easytier: 'off'
- size: "4294967296"
- unit: b
-- name: Create a thin-provisioned volume
- ibm.spectrum_virtualize.ibm_svc_vdisk:
- clustername: "{{clustername}}"
- domain: "{{domain}}"
- username: "{{username}}"
- password: "{{password}}"
- log_path: /tmp/playbook.debug
- name: volume0
- state: present
- mdiskgrp: Pool0
- easytier: 'off'
- size: "4294967296"
- unit: b
- rsize: '20%'
- autoexpand: true
-- name: Delete a volume
- ibm.spectrum_virtualize.ibm_svc_vdisk:
- clustername: "{{clustername}}"
- domain: "{{domain}}"
- username: "{{username}}"
- password: "{{password}}"
- log_path: /tmp/playbook.debug
- name: volume0
- state: absent
-'''
-
-RETURN = '''#'''
-
-from traceback import format_exc
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.ibm.spectrum_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi, svc_argument_spec, get_logger
-from ansible.module_utils._text import to_native
-
-
-class IBMSVCvdisk(object):
- def __init__(self):
- argument_spec = svc_argument_spec()
-
- argument_spec.update(
- dict(
- name=dict(type='str', required=True),
- state=dict(type='str', required=True, choices=['absent',
- 'present']),
- mdiskgrp=dict(type='str', required=False),
- size=dict(type='str', required=False),
- unit=dict(type='str', default='mb', choices=['b', 'kb',
- 'mb', 'gb',
- 'tb', 'pb']),
- easytier=dict(type='str', choices=['on', 'off']),
- rsize=dict(type='str', required=False),
- autoexpand=dict(type='bool', required=False)
- )
- )
-
- self.module = AnsibleModule(argument_spec=argument_spec,
- supports_check_mode=True)
-
- self.resizevdisk_flag = False
- self.expand_flag = False
- self.shrink_flag = False
-
- # logging setup
- log_path = self.module.params['log_path']
- log = get_logger(self.__class__.__name__, log_path)
- self.log = log.info
-
- # Required
- self.name = self.module.params['name']
- self.state = self.module.params['state']
-
- # Optional
- self.mdiskgrp = self.module.params['mdiskgrp']
- self.size = self.module.params['size']
- self.unit = self.module.params['unit']
- self.easytier = self.module.params.get('easytier', None)
- self.rsize = self.module.params['rsize']
- self.autoexpand = self.module.params['autoexpand']
-
- # Handling missing mandatory parameter name
- if not self.name:
- self.module.fail_json('Missing mandatory parameter: name')
-
- self.restapi = IBMSVCRestApi(
- module=self.module,
- clustername=self.module.params['clustername'],
- domain=self.module.params['domain'],
- username=self.module.params['username'],
- password=self.module.params['password'],
- validate_certs=self.module.params['validate_certs'],
- log_path=log_path,
- token=self.module.params['token']
- )
-
- def convert_to_bytes(self):
- return int(self.size) * (1024 ** (['b', 'kb', 'mb', 'gb', 'tb', 'pb'].index((self.unit).lower())))
-
- def get_existing_vdisk(self):
- self.log("Entering function get_existing_vdisk")
- cmd = 'lsvdisk'
- cmdargs = {}
- cmdopts = {'bytes': True}
- cmdargs = [self.name]
- existing_vdisk_data = self.restapi.svc_obj_info(cmd, cmdopts, cmdargs)
- return existing_vdisk_data
-
- # TBD: Implement a more generic way to check for properties to modify.
- def vdisk_probe(self, data):
- props = []
- # Check if change in vdisk size is required
- input_size = int(self.convert_to_bytes())
- actual_size = int(data[0]['capacity'])
- if self.size:
- if input_size != actual_size:
- props += ['resize']
- if input_size > actual_size:
- self.expand_flag = True
- self.change_in_size = input_size - actual_size
- else:
- self.shrink_flag = True
- self.change_in_size = actual_size - input_size
- # TBD: The parameter is easytier but the view has easy_tier label.
- if self.easytier:
- if self.easytier != data[1]['easy_tier']:
- props += ['easytier']
- self.log("vdisk_probe props='%s'", props)
- return props
-
- def detect_vdisk_type(self, data):
- isMirrored = False
- if data[0]['type'] == "many":
- isMirrored = True
- if not isMirrored:
- relationship_name = data[0]['RC_name']
- if relationship_name:
- rel_data = self.restapi.svc_obj_info(cmd='lsrcrelationship', cmdopts=None, cmdargs=[relationship_name])
- if rel_data['copy_type'] == "activeactive":
- isMirrored = True
- if isMirrored:
- self.module.fail_json(msg="Mirror volumes cannot be managed using this module.\
- To manage mirror volumes, module 'ibm_svc_manange_mirrored_volume' can be used")
-
- def resizevdisk(self):
- cmdopts = {}
- if self.expand_flag:
- cmd = "expandvdisksize"
- elif self.shrink_flag:
- cmd = "shrinkvdisksize"
- cmdopts["size"] = str(self.change_in_size)
- cmdopts["unit"] = "b"
- cmdargs = [self.name]
-
- self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
- self.changed = True
-
- def vdisk_create(self):
- if not self.mdiskgrp:
- self.module.fail_json(msg="You must pass in "
- "mdiskgrp to the module.")
- if not self.size:
- self.module.fail_json(msg="You must pass in size to the module.")
- if not self.unit:
- self.module.fail_json(msg="You must pass in unit to the module.")
-
- if self.module.check_mode:
- self.changed = True
- return
-
- self.log("creating vdisk '%s'", self.name)
-
- # Make command
- cmd = 'mkvdisk'
- cmdopts = {}
- if self.mdiskgrp:
- cmdopts['mdiskgrp'] = self.mdiskgrp
- if self.size:
- cmdopts['size'] = self.size
- if self.unit:
- cmdopts['unit'] = self.unit
- if self.easytier:
- cmdopts['easytier'] = self.easytier
- if self.rsize:
- cmdopts['rsize'] = self.rsize
- if self.autoexpand:
- cmdopts['autoexpand'] = self.autoexpand
- cmdopts['name'] = self.name
- self.log("creating vdisk command %s opts %s", cmd, cmdopts)
-
- # Run command
- result = self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
- self.log("create vdisk result %s", result)
-
- if 'message' in result:
- self.changed = True
- self.log("create vdisk result message %s", result['message'])
- else:
- self.module.fail_json(
- msg="Failed to create vdisk [%s]" % self.name)
-
- def vdisk_update(self, modify):
- self.log("updating vdisk '%s'", self.name)
- if 'resize' in modify and 'easytier' in modify:
- self.module.fail_json(msg="You cannot resize a volume while modifying other attributes")
- if self.module.check_mode:
- self.changed = True
- return
- if 'resize' in modify:
- self.resizevdisk()
- self.changed = True
- elif 'easytier' in modify:
- cmd = 'chvdisk'
- cmdopts = {}
- cmdopts['easytier'] = self.easytier
- cmdargs = [self.name]
-
- self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
- # Any error will have been raised in svc_run_command
- # chvdisk does not output anything when successful.
- self.changed = True
-
- def vdisk_delete(self):
- if self.module.check_mode:
- self.changed = True
- return
-
- self.log("deleting vdisk '%s'", self.name)
-
- cmd = 'rmvdisk'
- cmdopts = None
- cmdargs = [self.name]
-
- self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
-
- # Any error will have been raised in svc_run_command
- # chmvdisk does not output anything when successful.
- self.changed = True
-
- def apply(self):
- changed = False
- msg = None
- modify = []
-
- vdisk_data = self.get_existing_vdisk()
- if vdisk_data:
- self.detect_vdisk_type(vdisk_data)
- if self.state == 'absent':
- self.log("CHANGED: vdisk exists, but requested "
- "state is 'absent'")
- changed = True
- elif self.state == 'present':
- # This is where we detect if chvdisk or resize should be called
- modify = self.vdisk_probe(vdisk_data)
- if modify:
- changed = True
- else:
- if self.state == 'present':
- self.log("CHANGED: vdisk does not exist, "
- "but requested state is 'present'")
- changed = True
-
- if changed:
- if self.state == 'present':
- if not vdisk_data:
- self.vdisk_create()
- msg = "vdisk [%s] has been created." % self.name
- else:
- # This is where we would modify
- self.vdisk_update(modify)
- msg = "vdisk [%s] has been modified." % self.name
- elif self.state == 'absent':
- self.vdisk_delete()
- msg = "vdisk [%s] has been deleted." % self.name
-
- if self.module.check_mode:
- msg = 'skipping changes due to check mode'
- else:
- self.log("exiting with no changes")
- if self.state == 'absent':
- msg = "vdisk [%s] did not exist." % self.name
- else:
- msg = "vdisk [%s] already exists." % self.name
-
- self.module.exit_json(msg=msg, changed=changed)
-
-
-def main():
- v = IBMSVCvdisk()
- try:
- v.apply()
- except Exception as e:
- v.log("Exception in apply(): \n%s", format_exc())
- v.module.fail_json(msg="Module failed. Error [%s]." % to_native(e))
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/ibm/spectrum_virtualize/tests/unit/plugins/modules/test_ibm_svc_vdisk.py b/ansible_collections/ibm/spectrum_virtualize/tests/unit/plugins/modules/test_ibm_svc_vdisk.py
deleted file mode 100644
index a7d381dce..000000000
--- a/ansible_collections/ibm/spectrum_virtualize/tests/unit/plugins/modules/test_ibm_svc_vdisk.py
+++ /dev/null
@@ -1,473 +0,0 @@
-# Copyright (C) 2020 IBM CORPORATION
-# Author(s): Peng Wang <wangpww@cn.ibm.com>
-#
-# GNU General Public License v3.0+
-# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-""" unit tests IBM Spectrum Virtualize Ansible module: ibm_svc_vdisk """
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-import unittest
-import pytest
-import json
-from mock import patch
-from ansible.module_utils import basic
-from ansible.module_utils._text import to_bytes
-from ansible_collections.ibm.spectrum_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi
-from ansible_collections.ibm.spectrum_virtualize.plugins.modules.ibm_svc_vdisk import IBMSVCvdisk
-
-
-def set_module_args(args):
- """prepare arguments so that they will be picked up during module
- creation """
- args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
- basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
-
-
-class AnsibleExitJson(Exception):
- """Exception class to be raised by module.exit_json and caught by the
- test case """
- pass
-
-
-class AnsibleFailJson(Exception):
- """Exception class to be raised by module.fail_json and caught by the
- test case """
- pass
-
-
-def exit_json(*args, **kwargs): # pylint: disable=unused-argument
- """function to patch over exit_json; package return data into an
- exception """
- if 'changed' not in kwargs:
- kwargs['changed'] = False
- raise AnsibleExitJson(kwargs)
-
-
-def fail_json(*args, **kwargs): # pylint: disable=unused-argument
- """function to patch over fail_json; package return data into an
- exception """
- kwargs['failed'] = True
- raise AnsibleFailJson(kwargs)
-
-
-class TestIBMSVCvdisk(unittest.TestCase):
- """ a group of related Unit Tests"""
-
- @patch('ansible_collections.ibm.spectrum_virtualize.plugins.module_utils.'
- 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
- def setUp(self, connect):
- self.mock_module_helper = patch.multiple(basic.AnsibleModule,
- exit_json=exit_json,
- fail_json=fail_json)
- self.mock_module_helper.start()
- self.addCleanup(self.mock_module_helper.stop)
- self.restapi = IBMSVCRestApi(self.mock_module_helper, '1.2.3.4',
- 'domain.ibm.com', 'username', 'password',
- False, 'test.log', '')
-
- def set_default_args(self):
- return dict({
- 'name': 'test',
- 'state': 'present'
- })
-
- def test_module_fail_when_required_args_missing(self):
- """ required arguments are reported as errors """
- with pytest.raises(AnsibleFailJson) as exc:
- set_module_args({})
- IBMSVCvdisk()
- print('Info: %s' % exc.value.args[0]['msg'])
-
- @patch('ansible_collections.ibm.spectrum_virtualize.plugins.module_utils.'
- 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
- @patch('ansible_collections.ibm.spectrum_virtualize.plugins.module_utils.'
- 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
- def test_get_existing_volume(self, svc_authorize_mock, svc_obj_info_mock):
- set_module_args({
- 'clustername': 'clustername',
- 'domain': 'domain',
- 'state': 'present',
- 'username': 'username',
- 'password': 'password',
- 'name': 'test_get_existing_volume',
- 'mdiskgrp': 'Ansible-Pool'
- })
- vol_ret = [{"id": "0", "name": "test_get_existing_volume",
- "IO_group_id": "0", "IO_group_name": "io_grp0",
- "status": "online", "mdisk_grp_id": "0",
- "mdisk_grp_name": "Pool_Ansible_collections",
- "capacity": "4.00GB", "type": "striped", "FC_id": "",
- "FC_name": "", "RC_id": "", "RC_name": "",
- "vdisk_UID": "6005076810CA0166C00000000000019F",
- "fc_map_count": "0", "copy_count": "1",
- "fast_write_state": "empty", "se_copy_count": "0",
- "RC_change": "no", "compressed_copy_count": "0",
- "parent_mdisk_grp_id": "0",
- "parent_mdisk_grp_name": "Pool_Ansible_collections",
- "owner_id": "", "owner_name": "", "formatting": "no",
- "encrypt": "no", "volume_id": "0",
- "volume_name": "volume_Ansible_collections",
- "function": "", "protocol": "scsi"}]
- svc_obj_info_mock.return_value = vol_ret
- vol = IBMSVCvdisk().get_existing_vdisk()
- self.assertEqual('test_get_existing_volume', vol[0]['name'])
- self.assertEqual('0', vol[0]['id'])
-
- @patch('ansible_collections.ibm.spectrum_virtualize.plugins.modules.'
- 'ibm_svc_vdisk.IBMSVCvdisk.vdisk_create')
- @patch('ansible_collections.ibm.spectrum_virtualize.plugins.modules.'
- 'ibm_svc_vdisk.IBMSVCvdisk.get_existing_vdisk')
- @patch('ansible_collections.ibm.spectrum_virtualize.plugins.module_utils.'
- 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
- def test_volume_create_get_existing_volume_called(
- self, svc_authorize_mock,
- get_existing_volume_mock,
- vdisk_create_mock):
- set_module_args({
- 'clustername': 'clustername',
- 'domain': 'domain',
- 'state': 'present',
- 'username': 'username',
- 'password': 'password',
- 'name': 'test_volume',
- 'mdiskgrp': 'Ansible-Pool',
- 'easytier': 'off',
- 'size': '4294967296',
- 'unit': 'b',
- })
- vol_created = IBMSVCvdisk()
- get_existing_volume_mock.return_value = []
- vdisk_create_mock.return_value = {
- u'message': u'Storage volume, id [0] successfully created',
- u'id': u'0'
- }
- with pytest.raises(AnsibleExitJson) as exc:
- vol_created.apply()
- self.assertTrue(exc.value.args[0]['changed'])
- get_existing_volume_mock.assert_called_with()
-
- @patch('ansible_collections.ibm.spectrum_virtualize.plugins.modules.'
- 'ibm_svc_vdisk.IBMSVCvdisk.get_existing_vdisk')
- @patch('ansible_collections.ibm.spectrum_virtualize.plugins.module_utils.'
- 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
- def test_create_volume_failed_since_missed_required_param(
- self, svc_authorize_mock, get_existing_volume_mock):
- set_module_args({
- 'clustername': 'clustername',
- 'domain': 'domain',
- 'state': 'present',
- 'username': 'username',
- 'password': 'password',
- 'name': 'test_create_volume_failed_since_missed_required_param',
- 'easytier': 'off',
- 'size': '4294967296',
- 'unit': 'b',
- })
- get_existing_volume_mock.return_value = []
- vol_created = IBMSVCvdisk()
- with pytest.raises(AnsibleFailJson) as exc:
- vol_created.apply()
- self.assertTrue(exc.value.args[0]['failed'])
- get_existing_volume_mock.assert_called_with()
-
- @patch('ansible_collections.ibm.spectrum_virtualize.plugins.modules.'
- 'ibm_svc_vdisk.IBMSVCvdisk.get_existing_vdisk')
- @patch('ansible_collections.ibm.spectrum_virtualize.plugins.modules.'
- 'ibm_svc_vdisk.IBMSVCvdisk.vdisk_probe')
- @patch('ansible_collections.ibm.spectrum_virtualize.plugins.module_utils.'
- 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
- def test_create_volume_but_volume_existed(self, svc_authorize_mock,
- volume_probe_mock,
- get_existing_volume_mock):
- set_module_args({
- 'clustername': 'clustername',
- 'domain': 'domain',
- 'state': 'present',
- 'username': 'username',
- 'password': 'password',
- 'name': 'test_create_volume_but_volume_existed',
- 'mdiskgrp': 'Ansible-Pool',
- 'easytier': 'off',
- 'size': '4294967296',
- 'unit': 'b',
- })
- vol_ret = [{"id": "0", "name": "volume_Ansible_collections",
- "IO_group_id": "0", "IO_group_name": "io_grp0",
- "status": "online", "mdisk_grp_id": "0",
- "mdisk_grp_name": "Pool_Ansible_collections",
- "capacity": "4.00GB", "type": "striped", "FC_id": "",
- "FC_name": "", "RC_id": "", "RC_name": "",
- "vdisk_UID": "6005076810CA0166C00000000000019F",
- "fc_map_count": "0", "copy_count": "1",
- "fast_write_state": "empty", "se_copy_count": "0",
- "RC_change": "no", "compressed_copy_count": "0",
- "parent_mdisk_grp_id": "0",
- "parent_mdisk_grp_name": "Pool_Ansible_collections",
- "owner_id": "", "owner_name": "", "formatting": "no",
- "encrypt": "no", "volume_id": "0",
- "volume_name": "volume_Ansible_collections",
- "function": "", "protocol": "scsi"}]
- get_existing_volume_mock.return_value = vol_ret
- volume_probe_mock.return_value = []
- volume_created = IBMSVCvdisk()
- with pytest.raises(AnsibleExitJson) as exc:
- volume_created.apply()
- self.assertFalse(exc.value.args[0]['changed'])
- get_existing_volume_mock.assert_called_with()
-
- @patch('ansible_collections.ibm.spectrum_virtualize.plugins.modules.'
- 'ibm_svc_vdisk.IBMSVCvdisk.get_existing_vdisk')
- @patch('ansible_collections.ibm.spectrum_virtualize.plugins.modules.'
- 'ibm_svc_vdisk.IBMSVCvdisk.vdisk_create')
- @patch('ansible_collections.ibm.spectrum_virtualize.plugins.module_utils.'
- 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
- def test_create_volume_successfully(self, svc_authorize_mock,
- volume_create_mock,
- get_existing_volume_mock):
- set_module_args({
- 'clustername': 'clustername',
- 'domain': 'domain',
- 'state': 'present',
- 'username': 'username',
- 'password': 'password',
- 'name': 'test_create_volume_but_volume_existed',
- 'mdiskgrp': 'Ansible-Pool',
- 'easytier': 'off',
- 'size': '4294967296',
- 'unit': 'b',
- })
- volume = {u'message': u'Storage volume, id [0], '
- u'successfully created', u'id': u'0'}
- volume_create_mock.return_value = volume
- get_existing_volume_mock.return_value = []
- volume_created = IBMSVCvdisk()
- with pytest.raises(AnsibleExitJson) as exc:
- volume_created.apply()
- self.assertTrue(exc.value.args[0]['changed'])
- get_existing_volume_mock.assert_called_with()
-
- @patch('ansible_collections.ibm.spectrum_virtualize.plugins.modules.'
- 'ibm_svc_vdisk.IBMSVCvdisk.get_existing_vdisk')
- @patch('ansible_collections.ibm.spectrum_virtualize.plugins.module_utils.'
- 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
- @patch('ansible_collections.ibm.spectrum_virtualize.plugins.module_utils.'
- 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
- def test_create_volume_failed_since_no_message_in_result(
- self, svc_authorize_mock, svc_run_command_mock,
- get_existing_volume_mock):
- set_module_args({
- 'clustername': 'clustername',
- 'domain': 'domain',
- 'state': 'present',
- 'username': 'username',
- 'password': 'password',
- 'name': 'test_create_volume_but_volume_existed',
- 'mdiskgrp': 'Ansible-Pool',
- 'easytier': 'off',
- 'size': '4294967296',
- 'unit': 'b',
- })
- volume = {u'id': u'0'}
- svc_run_command_mock.return_value = volume
- get_existing_volume_mock.return_value = []
- volume_created = IBMSVCvdisk()
- with pytest.raises(AnsibleFailJson) as exc:
- volume_created.apply()
- get_existing_volume_mock.assert_called_with()
-
- @patch('ansible_collections.ibm.spectrum_virtualize.plugins.modules.'
- 'ibm_svc_vdisk.IBMSVCvdisk.get_existing_vdisk')
- @patch('ansible_collections.ibm.spectrum_virtualize.plugins.module_utils.'
- 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
- def test_delete_volume_but_volume_not_existed(self, svc_authorize_mock,
- get_existing_volume_mock):
- set_module_args({
- 'clustername': 'clustername',
- 'domain': 'domain',
- 'state': 'absent',
- 'username': 'username',
- 'password': 'password',
- 'name': 'test_create_volume_but_volume_existed',
- 'mdiskgrp': 'Ansible-Pool',
- 'size': '4294967296',
- 'unit': 'b',
- })
- get_existing_volume_mock.return_value = []
- volume_deleted = IBMSVCvdisk()
- with pytest.raises(AnsibleExitJson) as exc:
- volume_deleted.apply()
- self.assertFalse(exc.value.args[0]['changed'])
- get_existing_volume_mock.assert_called_with()
-
- @patch('ansible_collections.ibm.spectrum_virtualize.plugins.modules.'
- 'ibm_svc_vdisk.IBMSVCvdisk.get_existing_vdisk')
- @patch('ansible_collections.ibm.spectrum_virtualize.plugins.modules.'
- 'ibm_svc_vdisk.IBMSVCvdisk.vdisk_delete')
- @patch('ansible_collections.ibm.spectrum_virtualize.plugins.module_utils.'
- 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
- def test_delete_volume_successfully(self, svc_authorize_mock,
- volume_delete_mock,
- get_existing_volume_mock):
- set_module_args({
- 'clustername': 'clustername',
- 'domain': 'domain',
- 'state': 'absent',
- 'username': 'username',
- 'password': 'password',
- 'name': 'test_delete_volume_successfully',
- })
- vol_ret = [{"id": "0", "name": "volume_Ansible_collections",
- "IO_group_id": "0", "IO_group_name": "io_grp0",
- "status": "online", "mdisk_grp_id": "0",
- "mdisk_grp_name": "Pool_Ansible_collections",
- "capacity": "4.00GB", "type": "striped", "FC_id": "",
- "FC_name": "", "RC_id": "", "RC_name": "",
- "vdisk_UID": "6005076810CA0166C00000000000019F",
- "fc_map_count": "0", "copy_count": "1",
- "fast_write_state": "empty", "se_copy_count": "0",
- "RC_change": "no", "compressed_copy_count": "0",
- "parent_mdisk_grp_id": "0",
- "parent_mdisk_grp_name": "Pool_Ansible_collections",
- "owner_id": "", "owner_name": "", "formatting": "no",
- "encrypt": "no", "volume_id": "0",
- "volume_name": "volume_Ansible_collections",
- "function": "", "protocol": "scsi"}]
- get_existing_volume_mock.return_value = vol_ret
- volume_deleted = IBMSVCvdisk()
- with pytest.raises(AnsibleExitJson) as exc:
- volume_deleted.apply()
- self.assertTrue(exc.value.args[0]['changed'])
- get_existing_volume_mock.assert_called_with()
-
- @patch('ansible_collections.ibm.spectrum_virtualize.plugins.modules.'
- 'ibm_svc_vdisk.IBMSVCvdisk.get_existing_vdisk')
- @patch('ansible_collections.ibm.spectrum_virtualize.plugins.modules.'
- 'ibm_svc_vdisk.IBMSVCvdisk.vdisk_probe')
- @patch('ansible_collections.ibm.spectrum_virtualize.plugins.module_utils.'
- 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
- def test_create_thin_volume_but_volume_existed(self, svc_authorize_mock, volume_probe_mock, get_existing_volume_mock):
- set_module_args({
- 'clustername': 'clustername',
- 'domain': 'domain',
- 'state': 'present',
- 'username': 'username',
- 'password': 'password',
- 'name': 'test_create_volume_but_volume_existed',
- 'mdiskgrp': 'Ansible-Pool',
- 'easytier': 'off',
- 'size': '4294967296',
- 'unit': 'b',
- 'rsize': '20%',
- 'autoexpand': True
- })
- vol_ret = [{"id": "0", "name": "volume_Ansible_collections",
- "IO_group_id": "0", "IO_group_name": "io_grp0",
- "status": "online", "mdisk_grp_id": "0",
- "mdisk_grp_name": "Pool_Ansible_collections",
- "capacity": "4.00GB", "type": "striped", "FC_id": "",
- "FC_name": "", "RC_id": "", "RC_name": "",
- "vdisk_UID": "6005076810CA0166C00000000000019F",
- "fc_map_count": "0", "copy_count": "1",
- "fast_write_state": "empty", "se_copy_count": "0",
- "RC_change": "no", "compressed_copy_count": "0",
- "parent_mdisk_grp_id": "0",
- "parent_mdisk_grp_name": "Pool_Ansible_collections",
- "owner_id": "", "owner_name": "", "formatting": "no",
- "encrypt": "no", "volume_id": "0",
- "volume_name": "volume_Ansible_collections",
- "function": "", "protocol": "scsi"}]
- get_existing_volume_mock.return_value = vol_ret
- volume_probe_mock.return_value = []
- volume_created = IBMSVCvdisk()
- with pytest.raises(AnsibleExitJson) as exc:
- volume_created.apply()
- self.assertFalse(exc.value.args[0]['changed'])
- get_existing_volume_mock.assert_called_with()
-
- @patch('ansible_collections.ibm.spectrum_virtualize.plugins.modules.'
- 'ibm_svc_vdisk.IBMSVCvdisk.get_existing_vdisk')
- @patch('ansible_collections.ibm.spectrum_virtualize.plugins.modules.'
- 'ibm_svc_vdisk.IBMSVCvdisk.vdisk_create')
- @patch('ansible_collections.ibm.spectrum_virtualize.plugins.module_utils.'
- 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
- def test_create_thin_volume_successfully(self, svc_authorize_mock, volume_create_mock, get_existing_volume_mock):
- set_module_args({
- 'clustername': 'clustername',
- 'domain': 'domain',
- 'state': 'present',
- 'username': 'username',
- 'password': 'password',
- 'name': 'test_create_volume_but_volume_existed',
- 'mdiskgrp': 'Ansible-Pool',
- 'easytier': 'off',
- 'size': '4294967296',
- 'unit': 'b',
- 'rsize': '20%'
- })
- volume = {u'message': u'Storage volume, id [0], '
- u'successfully created', u'id': u'0'}
- volume_create_mock.return_value = volume
- get_existing_volume_mock.return_value = []
- volume_created = IBMSVCvdisk()
- with pytest.raises(AnsibleExitJson) as exc:
- volume_created.apply()
- self.assertTrue(exc.value.args[0]['changed'])
- get_existing_volume_mock.assert_called_with()
-
- @patch('ansible_collections.ibm.spectrum_virtualize.plugins.modules.'
- 'ibm_svc_vdisk.IBMSVCvdisk.get_existing_vdisk')
- @patch('ansible_collections.ibm.spectrum_virtualize.plugins.modules.'
- 'ibm_svc_vdisk.IBMSVCvdisk.vdisk_create')
- @patch('ansible_collections.ibm.spectrum_virtualize.plugins.module_utils.'
- 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
- def test_create_thin_volume_successfully_with_autoexpand(self, svc_authorize_mock, volume_create_mock, get_existing_volume_mock):
- set_module_args({
- 'clustername': 'clustername',
- 'domain': 'domain',
- 'state': 'present',
- 'username': 'username',
- 'password': 'password',
- 'name': 'test_create_volume_but_volume_existed',
- 'mdiskgrp': 'Ansible-Pool',
- 'easytier': 'off',
- 'size': '4294967296',
- 'unit': 'b',
- 'rsize': '20%',
- 'autoexpand': True
- })
- volume = {u'message': u'Storage volume, id [0], '
- u'successfully created', u'id': u'0'}
- volume_create_mock.return_value = volume
- get_existing_volume_mock.return_value = []
- volume_created = IBMSVCvdisk()
- with pytest.raises(AnsibleExitJson) as exc:
- volume_created.apply()
- self.assertTrue(exc.value.args[0]['changed'])
- get_existing_volume_mock.assert_called_with()
-
- @patch('ansible_collections.ibm.spectrum_virtualize.plugins.module_utils.'
- 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
- def test_convert_to_bytes(self, svc_authorize_mock):
- set_module_args({
- 'clustername': 'clustername',
- 'domain': 'domain',
- 'state': 'present',
- 'username': 'username',
- 'password': 'password',
- 'name': 'test_create_volume_but_volume_existed',
- 'mdiskgrp': 'Ansible-Pool',
- 'easytier': 'off',
- 'size': '2',
- 'unit': 'gb',
- 'rsize': '20%',
- 'autoexpand': True
- })
- v = IBMSVCvdisk()
- data = v.convert_to_bytes()
- self.assertEqual(2147483648, data)
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/ansible_collections/ibm/storage_virtualize/.github/workflows/ansible-test.yml b/ansible_collections/ibm/storage_virtualize/.github/workflows/ansible-test.yml
new file mode 100644
index 000000000..df3715a1b
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/.github/workflows/ansible-test.yml
@@ -0,0 +1,249 @@
+# README FIRST
+# 1. If you don't have unit tests, remove that section.
+# 2. If your collection depends on other collections ensure they are installed,
+# add them to the "test-deps" input.
+# 3. For the comprehensive list of the inputs supported by the
+# ansible-community/ansible-test-gh-action GitHub Action, see
+# https://github.com/marketplace/actions/ansible-test.
+# 4. If you want to prevent merging PRs that do not pass all tests,
+# make sure to add the "check" job to your repository branch
+# protection once this workflow is added.
+# It is also possible to tweak which jobs are allowed to fail. See
+# https://github.com/marketplace/actions/alls-green#gotchas for more detail.
+# 5. If you need help please ask in #ansible-community on the Libera.chat IRC
+# network.
+
+name: CI
+on:
+ # Run CI against all pushes (direct commits, also merged PRs), Pull Requests
+ push:
+ branches:
+ - main
+ - develop
+ pull_request:
+ # Run CI once per day (at 06:00 UTC)
+ # This ensures that even if there haven't been commits that we are still
+ # testing against latest version of ansible-test for each ansible-core
+ # version
+ schedule:
+ - cron: '0 6 * * *'
+
+concurrency:
+ group: >-
+ ${{ github.workflow }}-${{
+ github.event.pull_request.number || github.sha
+ }}
+ cancel-in-progress: true
+
+jobs:
+
+###
+# Sanity tests (REQUIRED)
+#
+# https://docs.ansible.com/ansible/latest/dev_guide/testing_sanity.html
+
+ sanity:
+ name: Sanity (â’¶${{ matrix.ansible }})
+ strategy:
+ matrix:
+ ansible:
+ # It's important that Sanity is tested against all stable-X.Y branches
+ # Testing against `devel` may fail as new tests are added.
+ # An alternative to `devel` is the `milestone` branch with
+ # gets synchronized with `devel` every few weeks and therefore
+ # tends to be a more stable target. Be aware that it is not updated
+ # around creation of a new stable branch, this might cause a problem
+ # that two different versions of ansible-test use the same sanity test
+ # ignore.txt file.
+ # The commented branches below are EOL,
+ # do you really need your collection to support them if it still does?
+ - stable-2.14
+ - stable-2.15
+ - stable-2.16
+ #- devel
+ # - milestone
+ # Ansible-test on various stable branches does not yet work well with cgroups v2.
+ # Since ubuntu-latest now uses Ubuntu 22.04, we need to fall back to the ubuntu-20.04
+ # image for these stable branches. The list of branches where this is necessary will
+ # shrink over time, check out https://github.com/ansible-collections/news-for-maintainers/issues/28
+ # for the latest list.
+ runs-on: >-
+ ${{ contains(fromJson(
+ '["stable-2.9", "stable-2.10", "stable-2.11"]'
+ ), matrix.ansible) && 'ubuntu-20.04' || 'ubuntu-latest' }}
+ steps:
+ # Run sanity tests inside a Docker container.
+ # The docker container has all the pinned dependencies that are
+ # required and all Python versions Ansible supports.
+ - name: Perform sanity testing
+ # See the documentation for the following GitHub action on
+ # https://github.com/ansible-community/ansible-test-gh-action/blob/main/README.md
+ uses: ansible-community/ansible-test-gh-action@release/v1
+ with:
+ ansible-core-version: ${{ matrix.ansible }}
+ #origin-python-version: 3.9
+ testing-type: sanity
+ # OPTIONAL If your sanity tests require code
+ # from other collections, install them like this
+ # test-deps: >-
+ # ansible.netcommon
+ # ansible.utils
+ # OPTIONAL If set to true, will test only against changed files,
+ # which should improve CI performance. See limitations on
+ # https://github.com/ansible-community/ansible-test-gh-action#pull-request-change-detection
+ pull-request-change-detection: true
+
+###
+# Unit tests (OPTIONAL)
+#
+# https://docs.ansible.com/ansible/latest/dev_guide/testing_units.html
+
+ units:
+ # Ansible-test on various stable branches does not yet work well with cgroups v2.
+ # Since ubuntu-latest now uses Ubuntu 22.04, we need to fall back to the ubuntu-20.04
+ # image for these stable branches. The list of branches where this is necessary will
+ # shrink over time, check out https://github.com/ansible-collections/news-for-maintainers/issues/28
+ # for the latest list.
+ runs-on: >-
+ ${{ contains(fromJson(
+ '["stable-2.9", "stable-2.10", "stable-2.11"]'
+ ), matrix.ansible) && 'ubuntu-20.04' || 'ubuntu-latest' }}
+ name: Units (â’¶${{ matrix.ansible }})
+ strategy:
+ # As soon as the first unit test fails, cancel the others to free up the CI queue
+ fail-fast: true
+ matrix:
+ ansible:
+ # The commented branches below are EOL,
+ # do you really need your collection to support them if it still does?
+ - stable-2.14
+ - stable-2.15
+ - stable-2.16
+ python:
+ - '3.9'
+ - '3.10'
+ - '3.11'
+ exclude:
+ # Python 3.10 is supported in 2.12 or later.
+ - ansible: stable-2.14
+ python: '2.6'
+ - ansible: stable-2.14
+ python: '3.6'
+ - ansible: stable-2.14
+ python: '3.7'
+ - ansible: stable-2.14
+ python: '3.8'
+ - ansible: stable-2.16
+ python: '3.9'
+
+ steps:
+ - name: >-
+ Install paramiko dependency
+ run: pip install paramiko
+ - name: >-
+ Perform unit testing against
+ Ansible version ${{ matrix.ansible }}
+ under Python ${{ matrix.python }}
+ # See the documentation for the following GitHub action on
+ # https://github.com/ansible-community/ansible-test-gh-action/blob/main/README.md
+ uses: ansible-community/ansible-test-gh-action@release/v1
+ with:
+ ansible-core-version: ${{ matrix.ansible }}
+ target-python-version: ${{ matrix.python }}
+ testing-type: units
+ # OPTIONAL If your unit tests require code
+ # from other collections, install them like this
+ #test-deps: >-
+ # ansible.netcommon
+ # ansible.utils
+ # OPTIONAL If set to true, will test only against changed files,
+ # which should improve CI performance. See limitations on
+ # https://github.com/ansible-community/ansible-test-gh-action#pull-request-change-detection
+ pull-request-change-detection: true
+
+
+###
+# Integration tests (RECOMMENDED)
+#
+# https://docs.ansible.com/ansible/latest/dev_guide/testing_integration.html
+
+
+# If the application you are testing is available as a docker container and you want to test
+# multiple versions see the following for an example:
+# https://github.com/ansible-collections/community.zabbix/tree/master/.github/workflows
+
+ integration:
+ # Ansible-test on various stable branches does not yet work well with cgroups v2.
+ # Since ubuntu-latest now uses Ubuntu 22.04, we need to fall back to the ubuntu-20.04
+ # image for these stable branches. The list of branches where this is necessary will
+ # shrink over time, check out https://github.com/ansible-collections/news-for-maintainers/issues/28
+ # for the latest list.
+ runs-on: >-
+ ${{ contains(fromJson(
+ '["stable-2.9", "stable-2.10", "stable-2.11"]'
+ ), matrix.ansible) && 'ubuntu-20.04' || 'ubuntu-latest' }}
+ name: I (â’¶${{ matrix.ansible }}+py${{ matrix.python }})
+ strategy:
+ fail-fast: false
+ matrix:
+ ansible:
+ # The commented branches below are EOL,
+ # do you really need your collection to support them if it still does?
+ - stable-2.14
+ - stable-2.15
+ - stable-2.16
+ #- devel
+ # - milestone
+ python:
+ - '3.9'
+ - '3.10'
+ - '3.11'
+ exclude:
+ # Because ansible-test doesn't support Python 3.9 for Ansible 2.9
+ # and Python 3.10 is supported in 2.12 or later.
+ # Python 2.6 is not supported with ansible-core >= 2.13
+ - ansible: stable-2.14
+ python: '2.6'
+ - ansible: stable-2.16
+ python: '3.9'
+
+ steps:
+ - name: >-
+ Perform integration testing against
+ Ansible version ${{ matrix.ansible }}
+ under Python ${{ matrix.python }}
+ # See the documentation for the following GitHub action on
+ # https://github.com/ansible-community/ansible-test-gh-action/blob/main/README.md
+ uses: ansible-community/ansible-test-gh-action@release/v1
+ with:
+ ansible-core-version: ${{ matrix.ansible }}
+ # OPTIONAL command to run before invoking `ansible-test integration`
+ # pre-test-cmd:
+ target-python-version: ${{ matrix.python }}
+ testing-type: integration
+ # OPTIONAL If your integration tests require code
+ # from other collections, install them like this
+ test-deps: ansible.netcommon
+ # OPTIONAL If set to true, will test only against changed files,
+ # which should improve CI performance. See limitations on
+ # https://github.com/ansible-community/ansible-test-gh-action#pull-request-change-detection
+ pull-request-change-detection: true
+
+
+ check: # This job does nothing and is only used for the branch protection
+ # or multi-stage CI jobs, like making sure that all tests pass before
+ # a publishing job is started.
+ if: always()
+
+ needs:
+ - sanity
+ - units
+ - integration
+
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Decide whether the needed jobs succeeded or failed
+ uses: re-actors/alls-green@release/v1
+ with:
+ jobs: ${{ toJSON(needs) }}
diff --git a/ansible_collections/ibm/storage_virtualize/.github/workflows/extra-docs-linting.yml b/ansible_collections/ibm/storage_virtualize/.github/workflows/extra-docs-linting.yml
new file mode 100644
index 000000000..beef22d72
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/.github/workflows/extra-docs-linting.yml
@@ -0,0 +1,34 @@
+name: Lint extra docsite docs and links
+on:
+ # Run CI against all pushes (direct commits, also merged PRs), Pull Requests
+ push:
+ branches:
+ - main
+ - develop
+ pull_request:
+ # Run CI once per day (at 06:00 UTC)
+ # This ensures that even if there haven't been commits that we are still testing against latest version of ansible-test for each ansible-base version
+ schedule:
+ - cron: '0 6 * * *'
+
+jobs:
+ docsite:
+ name: Lint extra docsite docs and links
+ permissions:
+ contents: read
+ runs-on: ubuntu-latest
+ steps:
+
+ - name: Check out code
+ uses: actions/checkout@v3
+
+ - name: Set up Python
+ uses: actions/setup-python@v3
+ with:
+ python-version: '3.10'
+
+ - name: Install antsibull-docs
+ run: pip install antsibull-docs --disable-pip-version-check
+
+ - name: Run collection docs linter
+ run: antsibull-docs lint-collection-docs .
diff --git a/ansible_collections/ibm/storage_virtualize/.github/workflows/galaxy-importer.yml b/ansible_collections/ibm/storage_virtualize/.github/workflows/galaxy-importer.yml
new file mode 100644
index 000000000..df88638a0
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/.github/workflows/galaxy-importer.yml
@@ -0,0 +1,47 @@
+name: ibm.storage_virtualize Ansible galaxy importer CI
+on:
+ push:
+ pull_request:
+ schedule:
+ - cron: '25 10 * * *'
+
+jobs:
+ galaxy-importer:
+ name: Validate storage_virtualize with galaxy-importer tool.
+ runs-on: ubuntu-latest
+ strategy:
+ fail-fast: false
+ steps:
+ - name: Check out code
+ uses: actions/checkout@v2
+
+ - name: Set up Python3
+ uses: actions/setup-python@v2
+ with:
+ python-version: 3.9
+
+ - name: Upgrade PIP
+ run: python3.9 -m pip install --upgrade pip
+
+ - name: Install ansible
+ run: python3.9 -m pip install ansible --disable-pip-version-check
+
+ - name: Install galaxy-importer tool
+ run: python3.9 -m pip install galaxy_importer --disable-pip-version-check
+
+ - name: Make directory to sync
+ run: |
+ pwd
+ mkdir -p ansible_collections/ibm/storage_virtualize
+ rsync -av . ansible_collections/ibm/storage_virtualize --exclude ansible_collections/ibm/storage_virtualize
+
+ - name: Build the tar package
+ run: ansible-galaxy collection build
+ working-directory: ./ansible_collections/ibm/storage_virtualize
+
+ - name: Run galaxy-importer tool on storage_virtualize
+ run: |
+ export GALAXY_IMPORTER_CONFIG=$(readlink -f galaxy-importer.cfg)
+ env | grep galaxy
+ python3.9 -m galaxy_importer.main ibm-storage_virtualize-*.tar.gz
+ working-directory: ./ansible_collections/ibm/storage_virtualize
diff --git a/ansible_collections/ibm/storage_virtualize/.vscode/extensions.json b/ansible_collections/ibm/storage_virtualize/.vscode/extensions.json
new file mode 100644
index 000000000..1450d869d
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/.vscode/extensions.json
@@ -0,0 +1,5 @@
+{
+ "recommendations": [
+ "redhat.ansible"
+ ]
+}
diff --git a/ansible_collections/ibm/storage_virtualize/CHANGELOG.rst b/ansible_collections/ibm/storage_virtualize/CHANGELOG.rst
new file mode 100644
index 000000000..297a04071
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/CHANGELOG.rst
@@ -0,0 +1,5 @@
+Will be updated by antsibull-changelog. Do not edit this manually!
+
+See https://github.com/ansible-community/antsibull-changelog/blob/main/docs/changelogs.rst for information on how to use antsibull-changelog.
+
+Check out ``changelogs/config.yaml`` for its configuration. You need to change at least the ``title`` field in there.
diff --git a/ansible_collections/ibm/storage_virtualize/CODE_OF_CONDUCT.md b/ansible_collections/ibm/storage_virtualize/CODE_OF_CONDUCT.md
new file mode 100644
index 000000000..0164155b8
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/CODE_OF_CONDUCT.md
@@ -0,0 +1,3 @@
+# Community Code of Conduct
+
+Please see the official [Ansible Community Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html).
diff --git a/ansible_collections/ibm/storage_virtualize/CONTRIBUTING.md b/ansible_collections/ibm/storage_virtualize/CONTRIBUTING.md
new file mode 100644
index 000000000..b4a17dec0
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/CONTRIBUTING.md
@@ -0,0 +1,3 @@
+# Contributing
+
+Refer to the [Contributing guidelines](https://github.com/ansible/community-docs/blob/main/contributing.rst).
diff --git a/ansible_collections/ibm/storage_virtualize/FILES.json b/ansible_collections/ibm/storage_virtualize/FILES.json
new file mode 100644
index 000000000..65786b52e
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/FILES.json
@@ -0,0 +1,1216 @@
+{
+ "files": [
+ {
+ "name": ".",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "REVIEW_CHECKLIST.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "91ad4aff2cc14b98f81fbe2d90609c5a69ed96b6d836387a9c697c1112e603c0",
+ "format": 1
+ },
+ {
+ "name": "codecov.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "48a0722ba5dec1ebbc4658e2b837a166b9b0555e82aabc9140e7cdddbdc34f9c",
+ "format": 1
+ },
+ {
+ "name": ".vscode",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": ".vscode/extensions.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f72b83e1aa1301adb18ce0fe71ce6613d3cfb148f881b3e39c55359d41d3277f",
+ "format": 1
+ },
+ {
+ "name": "LICENSE",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3972dc9744f6499f0f9b2dbf76696f2ae7ad8af9b23dde66d6af86c9dfb36986",
+ "format": 1
+ },
+ {
+ "name": "changelogs",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "changelogs/changelog.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8d46c8a5b8c531dfcffbda3480e7a169f208fe32bd1c19f4a2d5563405ebe9e2",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/.keep",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "changelogs/config.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "93e6532aa24bd6b494fb52a349736e1595e133e7b2c6ff8197dbf35c31879d69",
+ "format": 1
+ },
+ {
+ "name": "galaxy-importer.cfg",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7501d13dd591dda472fcdb8b9f44677a50ef86620f7756ba9c1196a41b2cd33c",
+ "format": 1
+ },
+ {
+ "name": "CODE_OF_CONDUCT.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "14ec928234a7ed52bf8b458d31e4862335111e477e4dbe7fb543686c24115140",
+ "format": 1
+ },
+ {
+ "name": "playbooks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/multi_volume_create_host_mapping_zone_multipath",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/multi_volume_create_host_mapping_zone_multipath/Readme.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3e1c937c3dc5c2d23d1b0d62d47e92a04dbe0f04133c081f31db7beac7276da6",
+ "format": 1
+ },
+ {
+ "name": "playbooks/multi_volume_create_host_mapping_zone_multipath/multiple_vol_creation_zone_map_vars.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6c638ad2658cbe7240e8017a455f6fa8c6b901d51559c50daee2f4a589456152",
+ "format": 1
+ },
+ {
+ "name": "playbooks/multi_volume_create_host_mapping_zone_multipath/multiple_vol_creation_zone_map.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "513ab8300365cd649cb76a3cac23f1d5614c23d9bcf1a46ef4ce408101d89e49",
+ "format": 1
+ },
+ {
+ "name": "playbooks/map_volume_to_host.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "acf4d9f2db62edece3f30c5b5f342549cede43382bdd5aa463685e659c10dec9",
+ "format": 1
+ },
+ {
+ "name": "playbooks/volume_migrate.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "072192ba1e92fd79f4a09fa57ea1edc75e6b669d042e4c0a16a817db7fcfd677",
+ "format": 1
+ },
+ {
+ "name": "playbooks/initial_setup_system_complete.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bfdb8203631aeaae41b9f43c83579faddb55cec54eac2da616da43e267555fef",
+ "format": 1
+ },
+ {
+ "name": "playbooks/volume_migration_on_svc_iscsi",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/volume_migration_on_svc_iscsi/Readme.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b915cecf935f0293f571d1c1a486f25b8da7f0ea225b6ff2042e6c2c66c78e05",
+ "format": 1
+ },
+ {
+ "name": "playbooks/volume_migration_on_svc_iscsi/create_iscsi_host_map_vol_switch.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "49dcbffe964be4cc854314f53287dde2b30f7046b21cb4e4d92e91650b6a60a7",
+ "format": 1
+ },
+ {
+ "name": "playbooks/volume_migration_on_svc_iscsi/vol_migration_vars.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2df6729924786d9ab23ad363854b6bc3bb1f76f1c3a29da6b317112e6accc02c",
+ "format": 1
+ },
+ {
+ "name": "playbooks/volume_migration_on_svc_iscsi/initiate_migration_for_given_volume.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1b8a0919915b3e762614fc58a9bc3386db37604b9fcb32a90268d53ebf3695be",
+ "format": 1
+ },
+ {
+ "name": "playbooks/create_GMCV_in_CG.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c18064cba84b7c335e88f40c5a222453ab9d10c32625cbdcd87f723eb7a4fb8d",
+ "format": 1
+ },
+ {
+ "name": "playbooks/volume_migration",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/volume_migration/README.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f2c060618fd8bb6183164531f7cf93fe1ea1b90ed5ad0982555e6305ece05d92",
+ "format": 1
+ },
+ {
+ "name": "playbooks/volume_migration/initiate_migration.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b414088cc7ee48a157daedd030e9eaf9656736acb9d623604c0939a63132fd07",
+ "format": 1
+ },
+ {
+ "name": "playbooks/volume_migration/vol_migration_vars.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b58ea00ff9f04eadb5e20e399c01690891373f5291d03c23fe2b1ef196721970",
+ "format": 1
+ },
+ {
+ "name": "playbooks/volume_migration/rescan_and_switch_paths.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c1c691f210afc9817cbb2836e48a8313831cd34c5944e215da3f702030ceefc6",
+ "format": 1
+ },
+ {
+ "name": "playbooks/volumegrp_create.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c735d93dca1c675be2da9272aec310ffd837adb5d6a294a5c8982bda024316ae",
+ "format": 1
+ },
+ {
+ "name": "playbooks/generic_ansible_sample.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4be7dedfe61e219fd352d77a9ef60f71540c16490a24606c0ba800949878ad8b",
+ "format": 1
+ },
+ {
+ "name": "playbooks/cluster_config_replication",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/cluster_config_replication/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/cluster_config_replication/vars/target_cluster_vars",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d10f2c907189ee0ee9b046e00670512681a831b2810688438713c2aca68f881e",
+ "format": 1
+ },
+ {
+ "name": "playbooks/cluster_config_replication/vars/replication_vars",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f3947041dfe9d8580f2541000232b2e58269bb3e4ae9646f97e8974be6981997",
+ "format": 1
+ },
+ {
+ "name": "playbooks/cluster_config_replication/vars/src_cluster_vars",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3e983ae309c230a71524b80d1f27e741a6c98360ea703ab3fb263af745fdedcf",
+ "format": 1
+ },
+ {
+ "name": "playbooks/cluster_config_replication/extract_src_cluster_config.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "529535316794fa278c0b909ec86bbdde57328946e189619928d8dcf4f5c0ab0e",
+ "format": 1
+ },
+ {
+ "name": "playbooks/cluster_config_replication/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ced994d0e235222a8db3a5161b77f39e1f0d2986605584c6aadf13a76ce05493",
+ "format": 1
+ },
+ {
+ "name": "playbooks/cluster_config_replication/replicate_config_on_target_cluster.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "33235d5acdbd48f8715e4efb56db0a27993bd50c6a15b6193aaff0f20cb45872",
+ "format": 1
+ },
+ {
+ "name": "playbooks/security_mgmt.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "125b8f35f82615fa5fc54d4a28044330b3418dda76cc83fcebae9910262a32f9",
+ "format": 1
+ },
+ {
+ "name": "playbooks/generic_info.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "079ef1a3807799f4f101f320846e015929fffe5f1ee233bb6bbc5b6e905724dd",
+ "format": 1
+ },
+ {
+ "name": "meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "meta/runtime.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4833e2900333e7a035d7e0f63f6d55777c2697476ee0a2f9bfcf250167c7571d",
+ "format": 1
+ },
+ {
+ "name": "meta/execution-environment.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "82854d0088f5a33247495393b516cea47b8c522131c0af4b7be755d75107af3d",
+ "format": 1
+ },
+ {
+ "name": ".github",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": ".github/workflows",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": ".github/workflows/extra-docs-linting.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "90a3046683e29e867d30de1f80a9b9a211935ebc6df47ffaa964bb3a85200fc4",
+ "format": 1
+ },
+ {
+ "name": ".github/workflows/galaxy-importer.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fd5cce0d04bee2e4359237fa30af8a89b6b9cf26f765ff43d3588d557d849559",
+ "format": 1
+ },
+ {
+ "name": ".github/workflows/ansible-test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7c415a86657f08b0a5a7f4114198b39d7d542cafce807f63e887fcc9796191ff",
+ "format": 1
+ },
+ {
+ "name": "requirements.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "66b9844c8fd55fcc7043ae89d3e5673e352f830e15dab46e84095f3066bd9b9f",
+ "format": 1
+ },
+ {
+ "name": "plugins",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/ibm_svc_ssh.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3f34a5e3b064801f496f5cef2d8f32be5b0a3c8a58c608c6128080a45715ab61",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/ibm_svc_utils.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "59df9e5577af7d7dbe680447d03bbec401d59d7e0c1bf41e3f7b453d9f30a69e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ibm_svc_manage_replicationgroup.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8e9ab72797ca6530d52cb1971c5e5c4d50512dfa69021444b6d46a9ec5aec091",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ibm_svc_complete_initial_setup.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "aeb3b99ff63bf3c2566907efb0b25aa690b656b24a2a1f49537ff92f6656cf49",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ibm_svc_hostcluster.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "924ed8a86ec840b9a7f38fc4c28d0785d4e6054b0533de163f2d19f247aa75f0",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ibm_svc_start_stop_replication.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "95ff5bc5d0dcc6c1b2bf309f59f4d8b65d5c1976c8e4732f1e0533ef2c3e0ae4",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ibm_svc_mdiskgrp.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fbd7fb464ddffb0f35978e37a35bf6b272d5c214f2c827cbce6c791ca3445eeb",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ibm_sv_manage_security.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f09bc9dbbe97c4938aa24bfa106f9b2c630d2165e2a4eed6fbe9fbb60bda6c98",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ibm_svc_manage_flashcopy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "22660152552bd0ffa30eb416d9ccfe2466787ca68d588c3600ab9ca86d0ab125",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ibm_sv_manage_cloud_backups.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b1e7c48216a178ddd23db2bcd37b3285fc7d4680446fbe1e763bda56459f3c28",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ibm_svc_host.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "478cfab62593eb59ad4ffddb2415fabb796a69a8ec3007d58fa7c2449c7d84c4",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ibm_svc_manage_volume.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bc63dac574a95c7c903cf3d564a875b9b6ec42abe13d62d7cba8c6a0a9e8d6ab",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ibm_svc_manage_usergroup.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "01c1764eb76d7a5b89948a294cbdb16ffb9fd44250a1c70e5dd349a7910e8e28",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ibm_sv_manage_storage_partition.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2af284bc2b18ca2085b1ac142866e44154568657d3299ae27136947cd08ed573",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ibm_sv_manage_syslog_server.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2bb65497ea658e5d7a6328aeeb857938b83107350604790e325239aaf6f2a3c5",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ibm_sv_manage_provisioning_policy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3cfb1a00ae10595a976b8e085b168b6c333531baf74002dc771c760124cf5c49",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ibm_sv_manage_fcportsetmember.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "06927ab51e15daa9bdb2a279ca063996d93c17cc15ce0fadf9674f7278fe2298",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ibm_svc_manage_consistgrp_flashcopy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "37b3c4554f2d877b9dcb7a22d512b0c8695febfb9a626abdc2e9c5b9f81953ec",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ibm_svc_manage_callhome.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "35dc4f1b6173de83060235374fdba5622af4f7cd4a6d43939c7d64aca5e41b9d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ibm_sv_manage_truststore_for_replication.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2129e50c6d11fdd7757e93849e363ae6c5a1c244eb580755b104bc81fa6bc851",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ibm_svc_initial_setup.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6df5099bbd03a163a64cf3b408870e3c0aab492696e321deeb6a9c1c4362a34a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ibm_svc_manage_user.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d886f8899df599e9399bd6f32da3fa36d55f89a5ecd48551112e9c4a1b4ece44",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ibm_sv_manage_replication_policy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9ff482923492ba6dc44c1917d61eff75a8c374232e679ff4ce7ba0566fab0c23",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ibm_svc_vol_map.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fd571b162fb6f3f412434db5dbaff2bd5e2203ae348ddf58b942be76621cccdc",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ibm_svc_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "166c5da36a20ef2004792ae93103537bb6241ed2130c0279087ae547a45a6f05",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ibm_svc_manage_ip.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b62794f4782512aec7dcb155c6cc6991bde1d3472b0992414a42489c5f17c375",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ibm_sv_switch_replication_direction.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "34834dc8be40f973e7f461daae9286e88eed580f6b91d57f37de4b519a546e5b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ibm_svcinfo_command.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3d9bca3a4dddeb0408990afd5d5af5b83fecc865a50f0b62815237c1f79d9c98",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ibm_sv_manage_fc_partnership.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f22f6fd2fcd7c98e560977befc2bfd5c90b3b2e2f9a5f2d2a9295ba6e513e638",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ibm_sv_manage_snapshot.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "053327533f48d1e820b782c6440eff9181381deffbefce883cc1c348c30fefb0",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ibm_svc_manage_portset.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3714a65f8a57534efa043dadec95ec85983e6794e59053e378ec42754f23cdeb",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ibm_svc_manage_cv.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1672c005f02a06307581a4f7170f33f80bac82dca7cb1534ba52bfa5e38d0abc",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ibm_sv_manage_ip_partnership.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "edff8d78d7da05cdc62c06ea7012b5369b18abcc6d09d9428f4e0655a29a28ce",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ibm_sv_manage_snapshotpolicy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7bac8513ae3f4d06979d3183a31a202d5061664c714cb968570055b1373d2810",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ibm_svc_manage_sra.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5bbc0fe557ebba474978829fd589e774685e6dce12e54032606be6ae34219a15",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ibm_svc_manage_safeguarded_policy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "699d0cd9d3f136e1343529991a12631d0f5d372889927ee3b435eea942568961",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ibm_svc_manage_ownershipgroup.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1a77bec8e26bf854454119bda34f9cab649c0554dd12558ef7bc48c09900d3dd",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ibm_svc_mdisk.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "69430ccf9944963bea9daf1b1e9d24b289568ae20997d3d771e70ea813230844",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ibm_svc_auth.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a75ea288176557ccb59da05a0efedf7fbed317daec51e1edc47dc622c585da23",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ibm_svc_manage_volumegroup.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a2e59b470d9474a17eaad0bd712fb6b3b5db13a6150facc349b28b9ef929b851",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ibm_svc_manage_mirrored_volume.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5a449f76ff76cdabccc3ae25f355717dbbe7962c457901461cd3c102c5683228",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ibm_sv_manage_ssl_certificate.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4b1a1f952a02d6dd512ca4e0fed514b0830c3483ebda5f682cd38e6c67854299",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ibm_svc_manage_replication.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "906651cde410ac28973f5097ba01efc738f264c52615eab225a82263f54cd1ec",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ibm_sv_manage_awss3_cloudaccount.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "83d75f56e006138d34b15e1cc4701f7209b9949c46e4f3855398e257e636f926",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ibm_svctask_command.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bc18ba5ce2bcea50072fe8aada9b82544d90b29ddfe96f18ce793fa53133de9c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ibm_svc_start_stop_flashcopy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ac4116f66ca33f34a5c6ce4bfb7a858a649be07a20fbb370c15b5e90fadb23af",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ibm_sv_restore_cloud_backup.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a90c15b0cd33cb897a13488e93ccecc2dd3cd8a39b74c76dfb4e202542cd261d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ibm_svc_manage_migration.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "71aed780211dbb976b3c980cb897f9502a6040b9a66140f931248ca32557a299",
+ "format": 1
+ },
+ {
+ "name": "CHANGELOG.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "28abf5a25fedab47e8fe7069886c916675f50448fd29d0b527f65d9ba77336e8",
+ "format": 1
+ },
+ {
+ "name": "MAINTAINING.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2435665a6562d5f3841fff1631970f95f0466c498e949d2b8579ccc2a0b810ad",
+ "format": 1
+ },
+ {
+ "name": "MAINTAINERS",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "requirements.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c53f932c3fe7d1ac0d9fe7672dc0671e31ef6c990d0fa9eb16803f92a2d3e3b5",
+ "format": 1
+ },
+ {
+ "name": "CONTRIBUTING.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "eec219dbc09446ed3e9938c8f66378621a0548b056426df33e3ea32e26bb4dc8",
+ "format": 1
+ },
+ {
+ "name": "README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c6c34f1dcc709115c97390cfddf2b258266a54586a8e17847cb870e5fc2b2f04",
+ "format": 1
+ },
+ {
+ "name": "tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/.gitkeep",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4a3e8aecd17a3392ba9a189241e8570e93a6eef3acf3ef747e5504c138eb6071",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/test_ibm_svc_ssh.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e1ae6a65b963598745bace7be6a12acaa061e22fdfea10874d1447d11d4b0b74",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/test_ibm_svc_utils.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "251e52729a6ff1f1d3753806ad14f01969dcb6b03cacd2a3d2bf765c967582c5",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_ibm_svc_manage_mirrored_volume.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6693c5dbc213f9a1121958ff931a97d03b2eb2f1e4d7aef250b6939981294b22",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_ibm_svc_manage_usergroup.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "44b3f1bdc42bc9f37da27e4652faaa1a4b889b7b738ff2f89bf3d1bc83b5873b",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_ibm_sv_manage_replication_policy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0c5cc0c3fa19300bf13857da5d04c2e47786bcf10ccda849edb2657b56ec24a6",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_ibm_svc_start_stop_replication.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e740057b968319878878046a5e6c6e77d37ffcbf345e2b40da9f6e08d13feb2e",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_ibm_svc_manage_consistgrp_flashcopy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "22bc146f9dc26497bfaae0aec0fa32cd75f04b049fbb79ab9bb8d9d0a258beec",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_ibm_svc_manage_sra.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1bc0ba67126907948d919a60f69853149f43fae2fef34a19521f03259e5e784e",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_ibm_svc_hostcluster.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c833cdec08909995833d9433a3b35db691042a2e2e02b15672f3718cc02c7884",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_ibm_svc_manage_ownershipgroup.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ad0f0150773a5154c46dea9e3c2ce4d614139e1fc05e5acb4fee8f9677960289",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_ibm_sv_restore_cloud_backup.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1c509b8d1897f33fd74cf30da4a4fd8b320a4e017ea5dccac4ac40a12737fff4",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_ibm_sv_manage_snapshot.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f02b852d412a5e2e350772592bd0f0393cbb8742452c4543cdbf52decb76b124",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_ibm_sv_manage_fc_partnership.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7078bd9b0bf5d5c8ce5850910fb7652d4257764afd4a60ae67e3fd5b4f92dd64",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_ibm_sv_manage_ssl_certificate.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "62e32d9800085b5e14a5de0cc13bc3948fe910010086b419914e725d5e9d4337",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_ibm_svc_manage_ip.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9b10264f24a887e68cea78315e4dc2bc67824973b1b3417bbb79da762054abea",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_ibm_svc_initial_setup.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "39f21e6443c0dad7049c0ccd726a9f5e76664f23f01845c4e27e54dea2e2b505",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_ibm_sv_manage_storage_partition.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "094dd502c7c9cc9aa4d94c6d0a1b4c23ff22337a2865ceb0a9ccec200267bf52",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_ibm_svc_manage_replicationgroup.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0a9be6c9b5113a56868c45a2c3a5394522841eeee8da963e99d700925b65c84d",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_ibm_sv_manage_fcportsetmember.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3828033b9b9ec6ce4d8e60698803bc51d526f8945ec7e9e3c892cf511ce5b1a5",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_ibm_sv_manage_cloud_backups.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "77338d6e2dfb7f4fb35f74d657c67fbd6e99d8f2a838b6d11102686d0ade4ece",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_ibm_svc_complete_initial_setup.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "df4627127a2afaca0a238198358a94e6078f32d5373162944aabe41d66496af8",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_ibm_sv_manage_ip_partnership.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a3601ecfe522efc7c7ca576d11ff3d81177dae4f4e29779eee0e94361e74a8bf",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_ibm_svc_manage_user.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "871f90b87ec2915dadf645e68e43982759d97c267b2770e9323a4f0a53739404",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_ibm_sv_manage_truststore_for_replication.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "80269a025427e222a60d45714817eccd5d424fd697a1b7d0060f300d4113de74",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_ibm_svc_manage_migration.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f8c8c5f221a5220418d45acf7053dc25522181574d416f8b814f38efcf407685",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_ibm_svc_manage_replication.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "115852ce5e5458cb1d70a098c138c46069d81fd7f8eef1035d43c96ee4aa2be2",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_ibm_svc_start_stop_flashcopy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "48d632cffb1d6fe599071748b2a547e2841c41d67ace2fa53e9358e870ed55d0",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_ibm_svc_manage_portset.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "eb7fa080a14a663ea0cf616ce6ae40061347eeda6c5b7f85b672693487215bfa",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_ibm_svcinfo_command.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ff82813823670602776d9a0f62560c413e263bb4fc8279d2940953cb921d498c",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_ibm_svc_host.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dad00c77dfd1bf18c77207c42af9609898a213cb790ea14fa2ef103d2ae007f5",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_ibm_svc_manage_volumegroup.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3eb828f0a87287626c01ab22268a335202cc3ba87d8918e9208fd2ac8f10d0d5",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_ibm_svctask_command.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cafcaf4dcc268440a1412d22684871c7f6ab147e7d99510b4e5e7d4d6981e34d",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_ibm_svc_mdisk.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "12024e33a7682b9c4631ed8cce702ebddbee984908b93e055c1764b8dc2d1328",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_ibm_svc_manage_cv.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f05dc1853d5e1dabbe466309665a773e3ada84a34329c68673beaafa6eaa0684",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_ibm_svc_manage_volume.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "59b08ae3821816984213a308e34b58e576b519a5cfcf450c9ba8374e79f7fbf8",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_ibm_sv_manage_snapshot_policy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "caf2d0f99a5e68fb97bd740b797e70bc379c6926efe015f1607a79ad4c2d3c06",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_ibm_svc_vol_map.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "43ac16e29b99d3a04ef1c337f2767a2cdf380f8ac0248134c05a91e51e4e0ec5",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_ibm_sv_manage_provisioning_policy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c6cd6a44b4d9d89a65e8d7304a7868988fdf3e19b3bf1534f9d120fd48d66b64",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_ibm_sv_switch_replication_direction.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4ddc5a5640ea7ca42d83a4a9d9e5afb121fd5732c66acd7ccd682648f79297c1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_ibm_svc_manage_flashcopy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b582bc3500e8e3083f91a1ae14c5d63d325269a9412673d6a028cfa62f267c49",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_ibm_sv_manage_syslog_server.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8f4f6e8cff5cdbfe095b97454ef8c423686cd41e228c4dfc14001f62fc1d6088",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_ibm_sv_manage_security.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ca91d989ff5be929945667e8b43a4acebc1a2d1c9d4592831fa56414cbca7fa7",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_ibm_svc_manage_safeguarded_policy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d383cef260137ae2fa0bde7f558b7612fad4d03baa4ef1879e2fed329141ed11",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_ibm_svc_auth.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3f13dd34e37f4ff7186a95ed4e2ecb6947d96d755bc0bda0db3412dc5977c573",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_ibm_svc_manage_callhome.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "403706226c4d55b5485b3883eb0d79dd3b179a66e8026c20b03db1142330e5fe",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_ibm_sv_manage_awss3_cloudaccount.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "65c47063d5a495125cf6a2009a69d4928c6fbd8bc9bb9867fceb423c153e85d5",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_ibm_svc_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "aec8007b506850a948756edd7fec5b2cdcb4158acacfabd4778d87b5eb47fb3e",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_ibm_svc_mdiskgrp.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9604da803964e66d4260f4b2598d319859d617844d57c912656111613a06209b",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/requirements.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8f5255cdac77d59146ed1c13708774bad757e1805198e73e3fb416a17908adad",
+ "format": 1
+ },
+ {
+ "name": "tests/config.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8bbb85dbed589969d508c46298ad61fdedee8fad8ffc34599f6acf75b968b75d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/.gitkeep",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4a3e8aecd17a3392ba9a189241e8570e93a6eef3acf3ef747e5504c138eb6071",
+ "format": 1
+ },
+ {
+ "name": "docs",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "docs/docsite",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "docs/docsite/links.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f87eeae747030dd3b96a73e62a66701b8761c9970d69908a51883a9007525bcd",
+ "format": 1
+ }
+ ],
+ "format": 1
+} \ No newline at end of file
diff --git a/ansible_collections/ibm/storage_virtualize/LICENSE b/ansible_collections/ibm/storage_virtualize/LICENSE
new file mode 100644
index 000000000..f288702d2
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/LICENSE
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ <program> Copyright (C) <year> <name of author>
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<https://www.gnu.org/licenses/>.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/ibm/storage_virtualize/MAINTAINERS b/ansible_collections/ibm/storage_virtualize/MAINTAINERS
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/MAINTAINERS
diff --git a/ansible_collections/ibm/storage_virtualize/MAINTAINING.md b/ansible_collections/ibm/storage_virtualize/MAINTAINING.md
new file mode 100644
index 000000000..9fad0d343
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/MAINTAINING.md
@@ -0,0 +1,3 @@
+# Maintaining this collection
+
+Refer to the [Maintainer guidelines](https://github.com/ansible/community-docs/blob/main/maintaining.rst).
diff --git a/ansible_collections/ibm/storage_virtualize/MANIFEST.json b/ansible_collections/ibm/storage_virtualize/MANIFEST.json
new file mode 100644
index 000000000..c82930f7a
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/MANIFEST.json
@@ -0,0 +1,34 @@
+{
+ "collection_info": {
+ "namespace": "ibm",
+ "name": "storage_virtualize",
+ "version": "2.3.1",
+ "authors": [
+ "Sumit Kumar Gupta (github.com/sumitguptaibm)",
+ "Lavanya C R (github.com/lavanyacr)",
+ "Sandip G Rajbanshi (github.com/Sandip-Rajbanshi)"
+ ],
+ "readme": "README.md",
+ "tags": [
+ "storage",
+ "flashsystem",
+ "ibmsvc"
+ ],
+ "description": "Ansible Collections for IBM Storage Virtualize",
+ "license": [],
+ "license_file": "LICENSE",
+ "dependencies": {},
+ "repository": "https://github.com/ansible-collections/ibm.storage_virtualize",
+ "documentation": null,
+ "homepage": "https://github.com/ansible-collections/ibm.storage_virtualize",
+ "issues": "https://github.com/ansible-collections/ibm.storage_virtualize/issues"
+ },
+ "file_manifest_file": {
+ "name": "FILES.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e063b7a7443aaae0476bb07ca3acab886cb7530d1053a973914871f38cc83b90",
+ "format": 1
+ },
+ "format": 1
+} \ No newline at end of file
diff --git a/ansible_collections/ibm/storage_virtualize/README.md b/ansible_collections/ibm/storage_virtualize/README.md
new file mode 100644
index 000000000..bbe212495
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/README.md
@@ -0,0 +1,157 @@
+# Ansible Collection - ibm.storage_virtualize
+
+[![Code of conduct](https://img.shields.io/badge/code%20of%20conduct-Ansible-silver.svg)](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html )
+
+This collection provides a series of Ansible modules and plugins for interacting with the IBM Storage Virtualize family products. These products include the IBM SAN Volume Controller, IBM FlashSystem family members built with IBM Storage Virtualize (FlashSystem 5xxx, 7xxx, 9xxx), IBM Storwize family, and IBM Storage Virtualize for Public Cloud. For more information regarding these products, see [IBM Documentation](https://www.ibm.com/docs/).
+
+## Requirements
+
+- Ansible version 2.14 or higher
+- Python 3.9 or higher for controller nodes
+
+## Installation
+
+To install the IBM Storage Virtualize collection hosted in Galaxy:
+
+```bash
+ansible-galaxy collection install ibm.storage_virtualize
+```
+
+To upgrade to the latest version of the IBM Storage Virtualize collection:
+
+```bash
+ansible-galaxy collection install ibm.storage_virtualize --force
+```
+
+## Usage
+
+### Playbooks
+
+To use a module from the IBM Storage Virtualize collection, please reference the full namespace, collection name, and module name that you want to use:
+
+```yaml
+---
+- name: Using the IBM Storage Virtualize collection
+ hosts: localhost
+ tasks:
+ - name: Gather info from storage
+ ibm.storage_virtualize.ibm_svc_info:
+ clustername: x.x.x.x
+ domain:
+ username: username
+ password: password
+ log_path: /tmp/playbook.debug
+ gather_subset: all
+```
+
+Alternatively, you can add a full namepsace and collection name in the `collections` element:
+
+```yaml
+---
+- name: Using the IBM Storage Virtualize collection
+ collections:
+ - ibm.storage_virtualize
+ gather_facts: no
+ connection: local
+ hosts: localhost
+ tasks:
+ - name: Gather info from storage
+ ibm_svc_info:
+ clustername: x.x.x.x
+ domain:
+ username: username
+ password: password
+ log_path: /tmp/playbook.debug
+ gather_subset: all
+```
+
+## Supported Resources
+
+### Modules
+
+- ibm_svc_auth - Generates an authentication token for a user on Storage Virtualize systems
+- ibm_svc_complete_initial_setup - Completes the initial setup configuration for LMC systems
+- ibm_svc_host - Manages hosts on Storage Virtualize systems
+- ibm_svc_hostcluster - Manages host cluster on Storage Virtualize systems
+- ibm_svc_info - Collects information on Storage Virtualize systems
+- ibm_svc_initial_setup - Manages initial setup configuration on Storage Virtualize systems
+- ibm_svc_manage_callhome - Manages configuration of Call Home feature on Storage Virtualize systems
+- ibm_svc_manage_consistgrp_flashcopy - Manages FlashCopy consistency groups on Storage Virtualize systems
+- ibm_svc_manage_cv - Manages the change volume in remote copy replication on Storage Virtualize systems
+- ibm_svc_manage_flashcopy - Manages FlashCopy mappings on Storage Virtualize systems
+- ibm_svc_manage_ip - Manages IP provisioning on Storage Virtualize systems
+- ibm_svc_manage_migration - Manages volume migration between clusters on Storage Virtualize systems
+- ibm_svc_manage_mirrored_volume - Manages mirrored volumes on Storage Virtualize systems
+- ibm_svc_manage_ownershipgroup - Manages ownership groups on Storage Virtualize systems
+- ibm_svc_manage_portset - Manages IP portset on Storage Virtualize systems
+- ibm_svc_manage_replication - Manages remote copy replication on Storage Virtualize systems
+- ibm_svc_manage_replicationgroup - Manages remote copy consistency groups on Storage Virtualize systems
+- ibm_svc_manage_safeguarded_policy - Manages safeguarded policy configuration on Storage Virtualize systems
+- ibm_svc_manage_sra - Manages the remote support assistance configuration on Storage Virtualize systems
+- ibm_svc_manage_user - Manages user on Storage Virtualize systems
+- ibm_svc_manage_usergroup - Manages user groups on Storage Virtualize systems
+- ibm_svc_manage_volume - Manages standard volumes on Storage Virtualize systems
+- ibm_svc_manage_volumegroup - Manages volume groups on Storage Virtualize systems
+- ibm_svc_mdisk - Manages MDisks for Storage Virtualize systems
+- ibm_svc_mdiskgrp - Manages pools for Storage Virtualize systems
+- ibm_svc_start_stop_flashcopy - Starts or stops FlashCopy mapping and consistency groups on Storage Virtualize systems
+- ibm_svc_start_stop_replication - Starts or stops remote-copy independent relationships or consistency groups on Storage Virtualize systems
+- ibm_svc_vol_map - Manages volume mapping for Storage Virtualize systems
+- ibm_svcinfo_command - Runs svcinfo CLI command on Storage Virtualize systems over SSH session
+- ibm_svctask_command - Runs svctask CLI command(s) on Storage Virtualize systems over SSH session
+- ibm_sv_manage_awss3_cloudaccount - Manages Amazon S3 cloud account configuration on Storage Virtualize systems
+- ibm_sv_manage_cloud_backup - Manages cloud backups on Storage Virtualize systems
+- ibm_sv_manage_fc_partnership - Manages Fibre Channel (FC) partnership on Storage Virtualize systems
+- ibm_sv_manage_fcportsetmember - Manages addition or removal of ports from the Fibre Channel (FC) portsets on Storage Virtualize systems
+- ibm_sv_manage_ip_partnership - Manages IP partnership configuration on Storage Virtualize systems
+- ibm_sv_manage_provisioning_policy - Manages provisioning policy configuration on Storage Virtualize systems
+- ibm_sv_manage_replication_policy - Manages policy-based replication configuration on Storage Virtualize systems
+- ibm_sv_manage_security - Manages configuration of security options on IBM Storage Virtualize family storage systems
+- ibm_sv_manage_snapshot - Manages snapshots (mutual consistent images of a volume) on Storage Virtualize systems
+- ibm_sv_manage_snapshotpolicy - Manages snapshot policy configuration on Storage Virtualize systems
+- ibm_sv_manage_ssl_certificate - Exports an existing system certificate on to Storage Virtualize systems
+- ibm_sv_manage_storage_partition - Manages storage partition configuration on Storage Virtualize systems
+- ibm_sv_manage_syslog_server - Manages syslog server configuration on Storage Virtualize systems
+- ibm_sv_manage_truststore_for_replication - Manages certificate trust stores for replication on Storage Virtualize family systems
+- ibm_sv_restore_cloud_backup - Restores cloud backups on Storage Virtualize systems
+- ibm_sv_switch_replication_direction - Switches the replication direction on Storage Virtualize systems
+
+### Other Feature Information
+- SV Ansible Collection v1.8.0 provides the new 'ibm_svc_complete_initial_setup' module, to complete the automation of Day 0 configuration on Licensed Machine Code (LMC) systems.
+ For non-LMC systems, login to the user-interface is required in order to complete the automation of Day 0 configuration.
+- SV Ansible Collection v1.7.0 provided `Setup and Configuration Automation` through different modules. This feature helps user to automate Day 0 configuration.
+ This feature includes three modules:
+ - ibm_svc_initial_setup
+ - ibm_svc_manage_callhome
+ - ibm_svc_manage_sra
+- By proceeding and using these modules, the user acknowledges that [IBM Privacy Statement](https://www.ibm.com/privacy) has been read and understood.
+
+### Prerequisite
+
+- Paramiko must be installed to use ibm_svctask_command and ibm_svcinfo_command modules.
+
+## Limitation
+
+The modules in the IBM Storage Virtualize Ansible collection leverage REST APIs to connect to the IBM Storage Virtualize system. This has following limitations:
+1. Using the REST APIs to list more than 2000 objects may create a loss of service from the API side, as it automatically restarts due to memory constraints.
+2. It is not possible to access REST APIs using an IPv6 address on a cluster.
+3. The Ansible collection can run on all IBM Storage Virtualize system versions above 8.1.3, except versions 8.3.1.3, 8.3.1.4 and 8.3.1.5.
+4. At time of release of the SV Ansible v1.8.0 collection, no module is available for non LMC systems to automate license agreements acceptance, including EULA.
+ User will be presented with a GUI setup wizard upon user-interface login, whether the Ansible modules have been used for initial configuration or not.
+
+
+## Releasing, Versioning, and Deprecation
+
+1. IBM Storage Virtualize Ansible Collection releases follow a quarterly release cycle.
+2. IBM Storage Virtualize Ansible Collection releases follow [semantic versioning](https://semver.org/).
+3. IBM Storage Virtualize Ansible modules deprecation cycle is aligned with [Ansible](https://docs.ansible.com/ansible/latest/dev_guide/module_lifecycle.html).
+
+## Contributing
+
+Currently we are not accepting community contributions.
+Though, you may periodically review this content to learn when and how contributions can be made in the future.
+IBM Storage Virtualize Ansible Collection maintainers can follow the [Maintainer guidelines](https://docs.ansible.com/ansible/devel/community/maintainers.html).
+
+## License
+
+GNU General Public License v3.0
diff --git a/ansible_collections/ibm/storage_virtualize/REVIEW_CHECKLIST.md b/ansible_collections/ibm/storage_virtualize/REVIEW_CHECKLIST.md
new file mode 100644
index 000000000..9dccf7ef1
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/REVIEW_CHECKLIST.md
@@ -0,0 +1,3 @@
+# Review Checklist
+
+Refer to the [Collection review checklist](https://github.com/ansible/community-docs/blob/main/review_checklist.rst).
diff --git a/ansible_collections/ibm/storage_virtualize/changelogs/changelog.yaml b/ansible_collections/ibm/storage_virtualize/changelogs/changelog.yaml
new file mode 100644
index 000000000..ef620673a
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/changelogs/changelog.yaml
@@ -0,0 +1,60 @@
+releases:
+ 2.0.0:
+ release_date: '2023-06-30'
+ changes:
+ minor_changes:
+ - ibm_svc_mdisk - Added support for Distributed Arrays (DRAID).
+ - ibm_svc_manage_flashcopy - Added support for backup type snapshots.
+ - ibm_svc_manage_volumegroup - Added support to rename an existing volume group.
+ bugfixes:
+ - ibm_svc_manage_volume - Allow adding hyperswap volume to a volume group.
+ 2.1.0:
+ release_date: '2023-09-29'
+ changes:
+ minor_changes:
+ - ibm_svc_host - Added support to associate/deassociate volume group with a storage partition.
+ - ibm_svc_info - Added support to display current security settings.
+ - ibm_svc_manage_volumgroup - Added support to associate/deassociate volume group with a storage partition.
+ - ibm_sv_manage_replication_policy - Added support to configure a 2-site-ha policy.
+ bugfixes:
+ release_summary: Introduced two new modules. Added support for syslog server management and storage partition.
+ modules:
+ - description: Manages security settings on Storage Virtualize system related to SSH protocol and password-related
+ configuration
+ name: ibm_sv_manage_security
+ namespace: ''
+ - description: Manages storage partition on Storage Virtualize system used for policy based High Availability
+ name: ibm_sv_manage_storage_partition
+ namespace: ''
+ - description: Manages syslog server configuration on Storage Virtualize system
+ name: ibm_sv_manage_syslog_server
+ namespace: ''
+ 2.2.0:
+ release_date: '2023-12-29'
+ changes:
+ minor_changes:
+ - ibm_svc_info - Added support to display information about thinclone/clone volumes and volumegroups.
+ - ibm_svc_host - Added support to create nvmetcp host.
+ - ibm_svc_manage_volumgroup - Added support to delete volumegroups keeping volumes via 'evictvolumes'.
+ - ibm_sv_manage_snapshot - Added support to restore entire volumegroup from a snapshot of that volumegroup.
+ - ibm_sv_manage_replication_policy - Added support to configure a 2-site-ha policy.
+ bugfixes:
+ release_summary: Added support for restoring volumegroups from snapshot, creating NVMeTCP host, features
+ (evictvolumes, retentionminutes, volume and volumegroup information) for thincloned/cloned volume and
+ volumegroups)
+ 2.3.0:
+ release_date: '2024-03-29'
+ changes:
+ minor_changes:
+ - ibm_svc_info - Added support to display information about partition, quorum, IO group, VG replication and
+ enclosure, snmp server and ldap server
+ - ibm_svc_manage_volume - Added support to create clone or thinclone from snapshot
+ - ibm_svc_manage_volumgroup - Added support to create clone or thinkclone volumegroup from snapshot from a
+ subset of volumes
+ - ibm_sv_manage_snapshot - Added support to restore subset of volumes of a volumegroup from a snapshot
+ bugfixes:
+ - ibm_svc_info - Command and release mapping to remove errors in gather_subset=all
+ - ibm_svc_info - Return error in listing entities that require object name
+
+ release_summary: Added support for restoring set of volumes from snapshot, clone and thinclone management,
+ and feature to release mapping for SVC entities.
diff --git a/ansible_collections/ibm/storage_virtualize/changelogs/config.yaml b/ansible_collections/ibm/storage_virtualize/changelogs/config.yaml
new file mode 100644
index 000000000..08c3ba7cc
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/changelogs/config.yaml
@@ -0,0 +1,29 @@
+changelog_filename_template: ../CHANGELOG.rst
+changelog_filename_version_depth: 0
+changes_file: changelog.yaml
+changes_format: combined
+keep_fragments: false
+mention_ancestor: true
+new_plugins_after_name: removed_features
+notesdir: fragments
+prelude_section_name: release_summary
+prelude_section_title: Release Summary
+sections:
+- - major_changes
+ - Major Changes
+- - minor_changes
+ - Minor Changes
+- - breaking_changes
+ - Breaking Changes / Porting Guide
+- - deprecated_features
+ - Deprecated Features
+- - removed_features
+ - Removed Features (previously deprecated)
+- - security_fixes
+ - Security Fixes
+- - bugfixes
+ - Bugfixes
+- - known_issues
+ - Known Issues
+title: CHANGE THIS IN changelogs/config.yaml!
+trivial_section_name: trivial
diff --git a/ansible_collections/ibm/storage_virtualize/changelogs/fragments/.keep b/ansible_collections/ibm/storage_virtualize/changelogs/fragments/.keep
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/changelogs/fragments/.keep
diff --git a/ansible_collections/ibm/storage_virtualize/codecov.yml b/ansible_collections/ibm/storage_virtualize/codecov.yml
new file mode 100644
index 000000000..6d374ebe7
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/codecov.yml
@@ -0,0 +1,6 @@
+# CHANGE THIS
+fixes:
+ - "/ansible_collections/NAMESPACE/COLLECTION/::"
+ignore:
+ - "**/test_*.py"
+ - "**/playbooks" \ No newline at end of file
diff --git a/ansible_collections/ibm/storage_virtualize/docs/docsite/links.yml b/ansible_collections/ibm/storage_virtualize/docs/docsite/links.yml
new file mode 100644
index 000000000..d760eb597
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/docs/docsite/links.yml
@@ -0,0 +1,45 @@
+---
+# This will make sure that plugin and module documentation gets Edit on GitHub links
+# that allow users to directly create a PR for this plugin or module in GitHub's UI.
+# Remove this section if the collection repository is not on GitHub, or if you do not want this
+# functionality for your collection.
+edit_on_github:
+ repository: ansible-collections/community.REPO_NAME
+ branch: main
+ # If your collection root (the directory containing galaxy.yml) does not coincide with your
+ # repository's root, you have to specify the path to the collection root here. For example,
+ # if the collection root is in a subdirectory ansible_collections/community/REPO_NAME
+ # in your repository, you have to set path_prefix to 'ansible_collections/community/REPO_NAME'.
+ path_prefix: ''
+
+# Here you can add arbitrary extra links. Please keep the number of links down to a
+# minimum! Also please keep the description short, since this will be the text put on
+# a button.
+#
+# Also note that some links are automatically added from information in galaxy.yml.
+# The following are automatically added:
+# 1. A link to the issue tracker (if `issues` is specified);
+# 2. A link to the homepage (if `homepage` is specified and does not equal the
+# `documentation` or `repository` link);
+# 3. A link to the collection's repository (if `repository` is specified).
+
+extra_links:
+ - description: Report an issue
+ url: https://github.com/ansible-collections/community.REPO_NAME/issues/new/choose
+
+# Specify communication channels for your collection. We suggest to not specify more
+# than one place for communication per communication tool to avoid confusion.
+communication:
+ matrix_rooms:
+ - topic: General usage and support questions
+ room: '#users:ansible.im'
+ irc_channels:
+ - topic: General usage and support questions
+ network: Libera
+ channel: '#ansible'
+ mailing_lists:
+ - topic: Ansible Project List
+ url: https://groups.google.com/g/ansible-project
+ # You can also add a `subscribe` field with an URI that allows to subscribe
+ # to the mailing list. For lists on https://groups.google.com/ a subscribe link is
+ # automatically generated.
diff --git a/ansible_collections/ibm/storage_virtualize/galaxy-importer.cfg b/ansible_collections/ibm/storage_virtualize/galaxy-importer.cfg
new file mode 100644
index 000000000..767bd47a4
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/galaxy-importer.cfg
@@ -0,0 +1,2 @@
+[galaxy-importer]
+RUN_FLAKE8 = True \ No newline at end of file
diff --git a/ansible_collections/ibm/storage_virtualize/meta/execution-environment.yml b/ansible_collections/ibm/storage_virtualize/meta/execution-environment.yml
new file mode 100644
index 000000000..ea2e299da
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/meta/execution-environment.yml
@@ -0,0 +1,12 @@
+---
+version: 1
+
+build_arg_defaults:
+ ANSIBLE_GALAXY_CLI_COLLECTION_OPTS: "-v"
+
+ # ansible_config: '/etc/ansible/ansible.cfg'
+
+dependencies:
+ galaxy: requirements.yml
+ python: requirements.txt
+# system: bindep.txt
diff --git a/ansible_collections/ibm/storage_virtualize/meta/runtime.yml b/ansible_collections/ibm/storage_virtualize/meta/runtime.yml
new file mode 100644
index 000000000..be99ccf4b
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/meta/runtime.yml
@@ -0,0 +1,2 @@
+---
+requires_ansible: '>=2.14.0'
diff --git a/ansible_collections/ibm/storage_virtualize/playbooks/cluster_config_replication/README.md b/ansible_collections/ibm/storage_virtualize/playbooks/cluster_config_replication/README.md
new file mode 100644
index 000000000..ccc441f09
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/playbooks/cluster_config_replication/README.md
@@ -0,0 +1,88 @@
+# Flashsystem Configuration Replication using Ansible
+
+**Objective:**
+Replication of system configuration from one Flashsystem to another Flashsystem
+
+**Prerequisite:**
+- IBM Storage Virtualize ansible collection version 2.2.0 or above must be installed
+
+**Features:**
+- Set System name
+- Set up NTP server
+- Set up Timezone
+- Set up DNS server
+- Create Ownership groups
+- Create Usergroups
+- Create Users
+
+**Details about files:**
+
+1. src_cluster_vars:
+ This file stores source cluster credentials, defines settable_fields required to extract from source cluster for specified entity, and formats raw extracted data to generate replication_vars file in desired format which will be provided as an input to replicate_config_on_target_cluster.yml playbook.
+ Fields to be set by user:
+ cluster_ip: Cluster IP source system
+ cluster_username: Username of source system
+ cluster_password: Password of source system
+
+2. extract_src_cluster_config.yml:
+ This playbook takes src_cluster_vars file as an input and gathers cluster configuration and writes it onto replication_vars file in a format that can be consumed by replicate_config_on_target_cluster.yml playbook for replication.
+ To run this playbook:
+ ```
+ ansible-playbook extract_src_cluster_config.yml
+ ```
+
+3. target_cluster_vars:
+ This file stores destination cluster credentials, which can be encrypted.
+ Fields to be set by user:
+ cluster_ip: Cluster IP target system
+ cluster_username: Username of target system
+ cluster_password: Password of target system
+ user_default_password: Default password to be set for newly created users
+ >IMPORTANT:
+ user_default_password value should be according to Flashsystem password policy
+ To encrypt cluster_vars file:
+ ```
+ ansible-vault encrypt target_cluster_vars
+ ```
+ This command will promt to set a password. Note the password entered at prompt, as it will be required while running playbooks
+ To open/ edit this file later:
+ ```
+ ansible-vault edit target_cluster_vars
+ ```
+
+4. replicate_config_on_target_cluster.yml:
+ Run this playbook after running extract_src_cluster_config.yml playbook. This playbook takes replication_vars file generated by extract_src_cluster_config.yml playbook as an input and configures target system accordingly.
+ Note: If target_cluster_vars file has been encrypted use following command to run replicate_config_on_target_cluster.yml playbook-
+ ```
+ ansible-playbook replicate_config_on_target_cluster.yml --ask-vault-pass
+ ```
+ Enter password used in previous step to encrypt target_cluster_vars file
+ >Note:
+ This playbook sets default password for users created on target system, user will be asked to change this default password on next login.
+ >IMPORTANT:
+ If superuser is also being replicated, use the default password for next login.
+
+**Usage:**
+1. Replicate from one system to another exactly:
+- Set source cluster details in src_cluster_vars
+- Run playbook extract_src_cluster_config.yml
+- Set target cluster details in target_cluster_vars
+- Run playbook replicate_config_on_target_cluster.yml
+
+2. Replicate from one system to another with few modifications:
+- Set source cluster details in src_cluster_vars
+- Run playbook extract_src_cluster_config.yml
+- Edit config fields to be modified in replication_vars file
+- Set target cluster details in target_cluster_vars
+- Run playbook replicate_config_on_target_cluster.yml
+
+3. Set-up system by customising config:
+- Set config fields in replication_vars file
+- Set target cluster details in target_cluster_vars
+- Run playbook replicate_config_on_target_cluster.yml
+
+**Authors:**
+Ajinkya Nanavati (ananava1@in.ibm.com)
+Devendra Mahajan (demahaj1@in.ibm.com)
+Mohit Chitlange (mochitla@in.ibm.com)
+Vrinda Dhakad (vrinda.dhakad@ibm.com)
diff --git a/ansible_collections/ibm/storage_virtualize/playbooks/cluster_config_replication/extract_src_cluster_config.yml b/ansible_collections/ibm/storage_virtualize/playbooks/cluster_config_replication/extract_src_cluster_config.yml
new file mode 100644
index 000000000..86be57f1b
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/playbooks/cluster_config_replication/extract_src_cluster_config.yml
@@ -0,0 +1,97 @@
+- name: Using Storage Virtualize collection to extract source cluster config
+ hosts: localhost
+ vars_files:
+ - vars/src_cluster_vars
+ vars:
+ src_file: "vars/src"
+ dest_file: "vars/replication_vars"
+
+ collections:
+ - ibm.storage_virtualize
+
+ gather_facts: no
+ connection: local
+ tasks:
+
+ - name: Fetch authorization token for source
+ register: svc_token
+ ibm_svc_auth:
+ clustername: "{{ cluster_ip }}"
+ username: "{{ cluster_username }}"
+ password: "{{ cluster_password }}"
+
+ - name: Get details of the cluster
+ register: sysinfo
+ ibm_svc_info:
+ clustername: "{{ cluster_ip }}"
+ token: "{{ svc_token.token }}"
+ gather_subset: [system, dnsserver, ownershipgroup, usergroup, user, emailserver, emailuser]
+ log_path: /tmp/sysinfo.debug
+
+ - name: Define variables
+ set_fact:
+ settable_system_info: {}
+ settable_info_tasks:
+ DnsServer: settable_dns_info
+ Ownershipgroup: settable_ownershipgrp_info
+ UserGrp: settable_usergrp_info
+ User: settable_user_info
+
+ - name: Get settable system parameters from list
+ set_fact:
+ settable_system_info: "{{ settable_system_info | combine({item: sysinfo.System[item]}) }}"
+ loop: "{{settable_fields.System_fields}}"
+ when: sysinfo.System[item] is defined
+
+ - name: Get settable parameters from list
+ set_fact:
+ "{{ item.value }}": "{{ sysinfo[item.key] | json_query(query) }}"
+ vars:
+ query: "[*].{ {% for field in settable_fields[item.key ~ '_fields'] %} {{ field }}: {{ field }}{% if not loop.last %},{% endif %}{% endfor %} }"
+ loop: "{{ settable_info_tasks | dict2items }}"
+ when: sysinfo[item.key] is defined
+
+ - name: Get current timestamp
+ command: "date '+%Y%m%d%H%M%S'"
+ register: timestamp_output
+
+ - name: Add timestamp to the temp file
+ set_fact:
+ src_file: "{{ src_file }}_{{ timestamp_output.stdout }}"
+
+ - name: Create empty file
+ file:
+ path: "{{ src_file }}"
+ state: touch
+ force: yes
+ register: src_creation
+
+ - name: Write content into file
+ lineinfile:
+ path: "{{ src_file }}"
+ line: "System: {{settable_system_info }} \nDnsServer: {{settable_dns_info}} \nOwnershipGroup: {{settable_ownershipgrp_info}} \nUserGrp: {{settable_usergrp_info}} \nUser: {{settable_user_info}}"
+
+ - name: Read file content
+ register: file_data
+ slurp:
+ src: "{{ src_file }}"
+
+ - name: Modify file content
+ set_fact:
+ modified_content: "{{ file_data.content | b64decode }}"
+
+ - name: Search and replace strings
+ loop: "{{ search_replace_pairs }}"
+ set_fact:
+ modified_content: "{{ modified_content | regex_replace(item.search, item.replace) }}"
+
+ - name: Write modified content to destination file
+ copy:
+ content: "{{ modified_content }}"
+ dest: "{{ dest_file }}"
+ force: yes
+
+ - name: Deleting temporary source file
+ file:
+ path: "{{ src_file }}"
+ state: absent
diff --git a/ansible_collections/ibm/storage_virtualize/playbooks/cluster_config_replication/replicate_config_on_target_cluster.yml b/ansible_collections/ibm/storage_virtualize/playbooks/cluster_config_replication/replicate_config_on_target_cluster.yml
new file mode 100644
index 000000000..3bd5d0fa9
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/playbooks/cluster_config_replication/replicate_config_on_target_cluster.yml
@@ -0,0 +1,71 @@
+- name: Using Storage Virtualize collection to replicate system
+ hosts: localhost
+ vars_files:
+ - vars/replication_vars
+ - vars/target_cluster_vars
+
+ collections:
+ - ibm.storage_virtualize
+ gather_facts: no
+
+ tasks:
+ - name: Fetch authorization token for target cluster
+ register: dest_token
+ ibm_svc_auth:
+ clustername: "{{ cluster_ip }}"
+ username: "{{ cluster_username }}"
+ password: "{{ cluster_password }}"
+
+ - name: Initial cluster configuration on FlashSystem
+ ibm.storage_virtualize.ibm_svc_initial_setup:
+ clustername: "{{ cluster_ip }}"
+ token: "{{ dest_token.token }}"
+
+ - name: Setup NTP server
+ ibm.storage_virtualize.ibm_svc_initial_setup:
+ clustername: "{{ cluster_ip }}"
+ token: "{{ dest_token.token }}"
+ ntpip: "{{ System.cluster_ntp_IP_address if System.cluster_ntp_IP_address is defined }}"
+
+ - name: Setup time zone
+ ibm.storage_virtualize.ibm_svc_initial_setup:
+ clustername: "{{ cluster_ip }}"
+ token: "{{ dest_token.token }}"
+ timezone: "{{ System.time_zone | regex_search('^[^\\s]+') if System.time_zone is defined }}"
+
+ - name: Setup DNS server
+ ibm.storage_virtualize.ibm_svc_initial_setup:
+ clustername: "{{ cluster_ip }}"
+ token: "{{ dest_token.token }}"
+ dnsname: "{{ DnsServer | map (attribute='name') | list }}"
+ dnsip: "{{ DnsServer | map (attribute='IP_address') | list }}"
+
+ - name: Create Ownership group
+ ibm.storage_virtualize.ibm_svc_manage_ownershipgroup:
+ name: "{{ item.name }}"
+ state: present
+ clustername: "{{ cluster_ip }}"
+ token: "{{ dest_token.token }}"
+ loop: "{{OwnershipGroup | default([], true) }}"
+
+ - name: Create Usergroups
+ ibm.storage_virtualize.ibm_svc_manage_usergroup:
+ name: "{{ item.name }}"
+ state: present
+ clustername: "{{ cluster_ip }}"
+ token: "{{ dest_token.token }}"
+ role: "{{item.role }}"
+ ownershipgroup: "{{item.owner_name }}"
+ loop: "{{ UserGrp | default([], true) }}"
+
+ - name: Create Users with password
+ ibm.storage_virtualize.ibm_svc_manage_user:
+ name: "{{ item.name }}"
+ state: present
+ clustername: "{{ cluster_ip }}"
+ token: "{{ dest_token.token }}"
+ usergroup: "{{ item.usergrp_name }}"
+ user_password: "{{ user_default_password }}"
+ forcepasswordchange: true
+ auth_type: usergrp
+ loop: "{{ User | default([], true) }}"
diff --git a/ansible_collections/ibm/storage_virtualize/playbooks/cluster_config_replication/vars/replication_vars b/ansible_collections/ibm/storage_virtualize/playbooks/cluster_config_replication/vars/replication_vars
new file mode 100644
index 000000000..92b6eee26
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/playbooks/cluster_config_replication/vars/replication_vars
@@ -0,0 +1,52 @@
+# This file is auto-generated by playbook extract_src_cluster_config.yml
+System:
+ time_zone: 200 IST
+ cluster_ntp_IP_address: x.x.x.x # NTP server IP address automatically populated from playbook
+ cluster_isns_IP_address:
+
+DnsServer:
+- name: dnsserver1
+ IP_address: y.y.y.y
+
+- name: dnsserver2
+ IP_address: z.z.z.z
+
+OwnershipGroup:
+- name: ownershipgroup0
+
+- name: ownershipgroup1
+
+UserGrp:
+- name: SecurityAdmin
+ role: SecurityAdmin
+ owner_name:
+
+
+- name: Administrator
+ role: Administrator
+ owner_name:
+
+
+- name: CopyOperator
+ role: CopyOperator
+ owner_name:
+
+
+- name: Service
+ role: Service
+ owner_name:
+
+
+- name: Monitor
+ role: Monitor
+ owner_name:
+
+
+- name: RestrictedAdmin
+ role: RestrictedAdmin
+ owner_name:
+
+User:
+- name: superuser
+ usergrp_name: SecurityAdmin
+
diff --git a/ansible_collections/ibm/storage_virtualize/playbooks/cluster_config_replication/vars/src_cluster_vars b/ansible_collections/ibm/storage_virtualize/playbooks/cluster_config_replication/vars/src_cluster_vars
new file mode 100644
index 000000000..2b7c60b64
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/playbooks/cluster_config_replication/vars/src_cluster_vars
@@ -0,0 +1,42 @@
+cluster_ip: x.x.x.x # Cluster IP of source system
+cluster_username: username # Username of source system
+cluster_password: password # Password of source system
+
+search_replace_pairs:
+ - search: "{"
+ replace: "\n"
+ - search: "}"
+ replace: "\n"
+ - search: ","
+ replace: "\n "
+ - search: "'"
+ replace: ""
+ - search: "\\["
+ replace: ""
+ - search: "\\]"
+ replace: ""
+ - search: "name"
+ replace: "- name"
+ - search: "_- name"
+ replace: "_name"
+ - search: "time_zone"
+ replace: " time_zone"
+
+settable_fields:
+ System_fields:
+ - time_zone
+ - cluster_ntp_IP_address
+ - cluster_isns_IP_address
+ DnsServer_fields:
+ - name
+ - IP_address
+ Ownershipgroup_fields:
+ - name
+ UserGrp_fields:
+ - name
+ - role
+ - owner_name
+ User_fields:
+ - name
+ - usergrp_name
+
diff --git a/ansible_collections/ibm/storage_virtualize/playbooks/cluster_config_replication/vars/target_cluster_vars b/ansible_collections/ibm/storage_virtualize/playbooks/cluster_config_replication/vars/target_cluster_vars
new file mode 100644
index 000000000..e9d6fabab
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/playbooks/cluster_config_replication/vars/target_cluster_vars
@@ -0,0 +1,4 @@
+cluster_ip: x.x.x.x # Cluster IP of target system
+cluster_username: username # Username of target system
+cluster_password: password # Password of target system
+user_default_password: new_password # Default password for users created on target system
diff --git a/ansible_collections/ibm/storage_virtualize/playbooks/create_GMCV_in_CG.yml b/ansible_collections/ibm/storage_virtualize/playbooks/create_GMCV_in_CG.yml
new file mode 100644
index 000000000..d1c841a3a
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/playbooks/create_GMCV_in_CG.yml
@@ -0,0 +1,119 @@
+---
+- name: Using IBM Storage Virtualize collection to create rc consistency group
+ hosts: localhost
+ collections:
+ - ibm.storage_virtualize
+ gather_facts: no
+ vars:
+ - auxcluster: x.x.x.x
+ - ausername: ausername
+ - apassword: apassword
+ - clustername: clustername
+ - username: username
+ - password: password
+ - cgname: Group_cg11
+ - remotecluster: Cluster_x.x.x.x
+ - masterpool: site1pool1
+ - mastervol: master
+ - relname: scopy5
+ - auxvol: auxvol
+ connection: local
+ tasks:
+ - name: Fetch authorization token for aux
+ register: auth
+ ibm_svc_auth:
+ clustername: "{{auxcluster}}"
+ username: "{{ausername}}"
+ password: "{{apassword}}"
+ - name: create target volume
+ ibm_svc_manage_volume:
+ clustername: "{{ auxcluster }}"
+ token: "{{auth.token}}"
+ pool: "{{auxpool}}"
+ name: "{{auxvol}}"
+ size: 10
+ unit: "gb"
+ state: present
+ - name: Fetch authorization token for master
+ register: results
+ ibm_svc_auth:
+ clustername: "{{clustername}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ - name: create remote copy cg
+ ibm_svc_manage_replicationgroup:
+ name: "{{cgname}}"
+ clustername: "{{clustername}}"
+ token: "{{results.token}}"
+ state: present
+ remotecluster: "{{remotecluster}}"
+ - name: Create source volume
+ ibm_svc_manage_volume:
+ clustername: "{{ clustername }}"
+ token: "{{results.token}}"
+ pool: "{{masterpool}}"
+ name: "{{mastervol}}"
+ size: 1
+ unit: "gb"
+ state: present
+ - name: create MM remote copy
+ ibm_svc_manage_replication:
+ name: "{{relname}}"
+ clustername: "{{ clustername }}"
+ token: "{{results.token}}"
+ state: present
+ remotecluster: "{{remotecluster}}"
+ master: "{{mastervol}}"
+ aux: "{{auxvol}}"
+ copytype: metro
+ sync: true
+ consistgrp: "{{cgname}}"
+ - name: remove the remote copy from CG
+ ibm_svc_manage_replication:
+ name: "{{relname}}"
+ clustername: "{{ clustername }}"
+ token: "{{results.token}}"
+ state: present
+ remotecluster: "{{remotecluster}}"
+ master: "{{mastervol}}"
+ aux: "{{auxvol}}"
+ copytype: metro
+ noconsistgrp: true
+ - name: Convert MM to GM
+ ibm_svc_manage_replication:
+ name: "{{relname}}"
+ clustername: "{{ clustername }}"
+ token: "{{results.token}}"
+ state: present
+ remotecluster: "{{remotecluster}}"
+ master: "{{mastervol}}"
+ aux: "{{auxvol}}"
+ copytype: global
+ - name: Convert GM to GMCV
+ ibm_svc_manage_replication:
+ name: "{{relname}}"
+ clustername: "{{clustername}}"
+ token: "{{results.token}}"
+ state: present
+ remotecluster: "{{remotecluster}}"
+ master: "{{mastervol}}"
+ aux: "{{auxvol}}"
+ copytype: GMCV
+ consistgrp: "{{cgname}}"
+ - name: Create/attach master change volume
+ ibm_svc_manage_cv:
+ clustername: "{{ clustername }}"
+ token: "{{results.token}}"
+ state: present
+ rname: "{{relname}}"
+ cvname: "{{ mastervolcv }}"
+ basevolume: "{{ mastervol }}"
+ - name: Create/attach aux change volume
+ ibm_svc_manage_cv:
+ clustername: "{{ auxcluster }}"
+ token: "{{auth.token}}"
+ state: present
+ rname: "{{relname}}"
+ cvname: "{{ auxvolcv }}"
+ basevolume: "{{ auxvol }}"
+ ismaster: false
diff --git a/ansible_collections/ibm/storage_virtualize/playbooks/generic_ansible_sample.yaml b/ansible_collections/ibm/storage_virtualize/playbooks/generic_ansible_sample.yaml
new file mode 100644
index 000000000..83a19ccda
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/playbooks/generic_ansible_sample.yaml
@@ -0,0 +1,34 @@
+---
+- name: Using the IBM Storage Virtualize collection
+ collections:
+ - ibm.storage_virtualize
+ gather_facts: no
+ connection: local
+ hosts: localhost
+ vars:
+ - clustername: x.x.x.x
+ - username: username
+ - password: password
+ - volname: vol0
+ - pool: pool0
+ - easy_tier: "off"
+ - size: 1
+ - unit: gb
+ tasks:
+ - name: Send CLI command over ssh connection
+ ibm_svctask_command:
+ command: [
+ "svctask mkvdisk -name {{ volname }} -mdiskgrp '{{ pool }}' -easytier '{{ easy_tier }}' -size {{ size }} -unit {{ unit }}",
+ "svctask rmvdisk {{ volname }}"
+ ]
+ clustername: "{{ clustername }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ log_path: /tmp/playbook.debug
+ - name: Send CLI command over ssh connection
+ ibm_svcinfo_command:
+ command: "svcinfo lsvdisk"
+ clustername: "{{ clustername }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ log_path: /tmp/playbook.debug
diff --git a/ansible_collections/ibm/storage_virtualize/playbooks/generic_info.yml b/ansible_collections/ibm/storage_virtualize/playbooks/generic_info.yml
new file mode 100644
index 000000000..1488bcded
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/playbooks/generic_info.yml
@@ -0,0 +1,24 @@
+---
+- name: Using the IBM Storage Virtualize collection
+ collections:
+ - ibm.storage_virtualize
+ gather_facts: no
+ connection: local
+ hosts: localhost
+ vars:
+ - user: username
+ - clustername: x.x.x.x
+ - username: username
+ - password: password
+ tasks:
+ - name: Run CLI commands
+ register: results
+ ibm_svcinfo_command:
+ command: "svcinfo lssystem"
+ clustername: "{{ clustername }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ log_path: /tmp/test.debug
+ - name: show time zone in lssystem
+ set_fact:
+ time_zone: "{{ (results['stdout'] | from_json).time_zone }}"
diff --git a/ansible_collections/ibm/storage_virtualize/playbooks/initial_setup_system_complete.yml b/ansible_collections/ibm/storage_virtualize/playbooks/initial_setup_system_complete.yml
new file mode 100644
index 000000000..96e78bf2d
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/playbooks/initial_setup_system_complete.yml
@@ -0,0 +1,74 @@
+- name: Using Storage Virtualize collection to automate initial setup configuration
+ hosts: localhost
+ collections:
+ - ibm.storage_virtualize
+ gather_facts: no
+ vars:
+ - clustername: clustername
+ - username: username
+ - password: password
+ - address: address
+ - city: city
+ - company_name: company_name
+ - contact_email: contact_email
+ - contact_name: contact_name
+ - country: country
+ - location: location
+ - primary_phonenumber: primary_phonenumber
+ - postal_code: postal_code
+ - province: province
+ - server_ip: x.x.x.x
+ - server_port: xxxx
+ connection: local
+ tasks:
+ - name: Get auth token
+ register: results
+ ibm_svc_auth:
+ clustername: "{{clustername}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ - name: 1.initial setup config
+ ibm_svc_initial_setup:
+ clustername: "{{clustername}}"
+ token: "{{results.token}}"
+ system_name: "{{ system_name }}"
+ dnsname:
+ - dnsserver01
+ dnsip:
+ - 'x.x.x.x'
+ - name: 2.Configure callhome with "email"
+ ibm_svc_manage_callhome:
+ clustername: "{{clustername}}"
+ token: "{{results.token}}"
+ state: "enabled"
+ callhome_type: "email"
+ address: "{{ address}}"
+ city: "{{ city }}"
+ company_name: "{{ company_name }}"
+ contact_email: "{{ contact_email }}"
+ contact_name: "{{ contact_name }}"
+ country: "{{ country }}"
+ location: "{{ location }}"
+ phonenumber_primary: "{{ primary_phonenumber }}"
+ postalcode: "{{ postal_code }}"
+ province: "{{ province }}"
+ serverIP: "{{ server_ip }}"
+ serverPort: "{{ server_port }}"
+ inventory: "on"
+ invemailinterval: 1
+ enhancedcallhome: "on"
+ censorcallhome: "on"
+ - name: 3.Configure SRA
+ ibm_svc_manage_sra:
+ clustername: "{{clustername}}"
+ token: "{{results.token}}"
+ state: enabled
+ name: SRA
+ sra_ip: y.y.y.y
+ sra_port: 22
+ support: remote
+ - name: 4.Complete initial setup
+ ibm_svc_complete_initial_setup:
+ clustername: "{{clustername}}"
+ username: "{{username}}"
+ password: "{{password}}"
diff --git a/ansible_collections/ibm/storage_virtualize/playbooks/map_volume_to_host.yml b/ansible_collections/ibm/storage_virtualize/playbooks/map_volume_to_host.yml
new file mode 100644
index 000000000..0ab44b4b5
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/playbooks/map_volume_to_host.yml
@@ -0,0 +1,47 @@
+---
+- name: Testing the IBM Storage Virtualize collection ibm_svc_vol_map
+ hosts: localhost
+ vars:
+ - clustername: clustername
+ - username: username
+ - password: password
+ - domain: domain
+ - test_vdisk: vdisk_name
+ - pool: pool
+ - test_host: host_name
+ - fcwwpn1: fcwwpn
+ collections:
+ - ibm.storage_virtualize
+ gather_facts: no
+ connection: local
+ tasks:
+ - name: Create volume
+ ibm_svc_manage_volume:
+ clustername: "{{clustername}}"
+ domain: "{{domain}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ name: "{{test_vdisk}}"
+ state: present
+ pool: "{{pool}}"
+ size: "1024"
+ unit: "mb"
+ - name: Creating Host
+ ibm_svc_host:
+ clustername: "{{clustername}}"
+ domain: "{{domain}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ name: "{{test_host}}"
+ state: present
+ fcwwpn: "{{ fcwwpn1 }}"
+ - name: map Host to Vdisk
+ ibm_svc_vol_map:
+ clustername: "{{clustername}}"
+ domain: "{{domain}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ volname: "{{test_vdisk}}"
+ host: "{{test_host}}"
+ state: present
+ scsi: 0
diff --git a/ansible_collections/ibm/storage_virtualize/playbooks/multi_volume_create_host_mapping_zone_multipath/Readme.txt b/ansible_collections/ibm/storage_virtualize/playbooks/multi_volume_create_host_mapping_zone_multipath/Readme.txt
new file mode 100644
index 000000000..319ecff2b
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/playbooks/multi_volume_create_host_mapping_zone_multipath/Readme.txt
@@ -0,0 +1,28 @@
+Objective:
+This playbook creates FC host, multiple volumes, zones on Flashsystem Cluster and performs mapping of all volumes to host.
+
+Prerequisite:
+- IBM storage Virtualize and Brocade ansible collection plugins must be installed
+- For more information on Brocade switch ansible collection, please refer to https://github.com/brocade/ansible/blob/master/README.rst
+
+These playbooks maps multiple volumes of cluster to fc host
+- It uses storage virtualize ansible modules as well as brocade ansible modules to create zone
+
+There are total 2 files used for this use-case
+
+1. multiple_vol_creation_zone_map_vars
+ This file has all the variables required for playbooks
+ - cluster_* : Parameters starting with cluster contain cluster details where user wants to create volume, hosst etc
+ - brocade_switch_* : Parameters starting with brocade_switch contain brocade switch details
+ - application_host_*: Parameters starting with application_host contain application host details which is performing read/write of data
+ - volume_details : Parameters starting with volume contain volume details which will be mapped to host
+ - portset_* : Parameters starting with portset contain portset details required for creating fc host
+
+2. multi_volume_create_host_mapping_zone_multipath
+ - This playbook fetches the list of SCSI_HOST WWPN's associated with given fcioportid from specV cluster
+ - Creates zone with the name given and add specV ports fetched and host WWPN's given
+ - Creates multiple volumes based on volume details provided
+ - Maps the multiple volumes to Host to form multiple paths
+
+Authors: Ajinkya Nanavati (ananava1@in.ibm.com)
+ Mohit Chitlange (mochitla@in.ibm.com)
diff --git a/ansible_collections/ibm/storage_virtualize/playbooks/multi_volume_create_host_mapping_zone_multipath/multiple_vol_creation_zone_map.yml b/ansible_collections/ibm/storage_virtualize/playbooks/multi_volume_create_host_mapping_zone_multipath/multiple_vol_creation_zone_map.yml
new file mode 100644
index 000000000..a30d9bf83
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/playbooks/multi_volume_create_host_mapping_zone_multipath/multiple_vol_creation_zone_map.yml
@@ -0,0 +1,203 @@
+- name: Using Storage Virtualize collection to migrate given volumes
+ hosts: localhost
+ vars_files:
+ - multiple_vol_creation_zone_map_vars
+ collections:
+ - ibm.storage_virtualize
+ - brocade.fos
+ vars:
+ brocade_credentials:
+ fos_ip_addr: "{{ brocade_switch_ip }}"
+ fos_user_name: "{{ brocade_switch_username }}"
+ fos_password: "{{ brocade_switch_password }}"
+ https: False
+ gather_facts: no
+ connection: local
+ tasks:
+
+ - name: Fetch authorization token for source
+ register: specv_token
+ ibm.storage_virtualize.ibm_svc_auth:
+ clustername: "{{ cluster_ip }}"
+ username: "{{ cluster_username }}"
+ password: "{{ cluster_password }}"
+
+ - name: Get details of the targetportfc
+ register: fcdetails
+ ibm.storage_virtualize.ibm_svc_info:
+ clustername: "{{ cluster_ip }}"
+ token: "{{ specv_token.token }}"
+ gather_subset: [targetportfc]
+ log_path: /tmp/fcdetails.debug
+
+ - name: get the WWPN list from lstargetportfc for given fc_port_id
+ set_fact:
+ specv_wwpn: "{{ specv_wwpn|default([]) + [item['WWPN']]}}"
+ when: (item.protocol == 'scsi' and item.host_io_permitted == 'yes' and item.fc_io_port_id in cluster_fcioportid)
+ loop: "{{ fcdetails.TargetPortFC }}"
+
+ - name: modify specv wwpn the way switch want
+ set_fact:
+ specv_wwpn_switch_format: "{{ specv_wwpn_switch_format|default([]) +[item|map('join')|join(':')] }}"
+ loop: "{{ (specv_wwpn)|map('batch', 2)|map('list')|list|lower }}"
+
+ - name: get all zoning information from switch
+ brocade.fos.brocade_facts:
+ credential: "{{brocade_credentials}}"
+ vfid: -1
+ gather_subset:
+ - brocade_zoning
+
+ - name: copy the active config in var active_switch_config
+ set_fact:
+ active_switch_config: "{{ ansible_facts.brocade_zoning['effective-configuration'].cfg_name }}"
+
+ - name: Create zones on Brocade switch
+ vars:
+ zone:
+ - name: "{{ application_host_zone_name }}"
+ members: "{{ application_host_wwpns + specv_wwpn_switch_format }}"
+ brocade.fos.brocade_zoning_zone:
+ credential: "{{ brocade_credentials }}"
+ vfid: -1
+ zones: "{{ zone }}"
+ members_add_only: True
+
+ - name: Add zone to active configuration
+ vars:
+ cfgs:
+ - name: "{{ active_switch_config }}"
+ members:
+ - "{{ application_host_zone_name }}"
+ brocade.fos.brocade_zoning_cfg:
+ credential: "{{ brocade_credentials }}"
+ vfid: -1
+ members_add_only: True
+ cfgs: "{{ cfgs }}"
+ active_cfg: "{{ active_switch_config }}"
+
+ - name: create host list for specv without colon format
+ set_fact:
+ application_host_wwpns_specvformat_list: "{{ application_host_wwpns_specvformat_list | default([]) + [(item | replace(':',''))|upper]}}"
+ loop: "{{application_host_wwpns }}"
+
+ - name: create host list for specv without colon format
+ set_fact:
+ application_host_wwpns_specvformat: "{{application_host_wwpns_specvformat |default('')+item +':'}}"
+ loop: "{{application_host_wwpns_specvformat_list| select() }}"
+
+ - set_fact:
+ application_host_wwpns_specvformat: "{{ application_host_wwpns_specvformat[:-1]}}"
+
+ - name: Creating Host on specv
+ ibm.storage_virtualize.ibm_svc_host:
+ clustername: "{{ cluster_ip }}"
+ token: "{{ specv_token.token }}"
+ name: "{{ host_name }}"
+ state: present
+ fcwwpn: "{{ application_host_wwpns_specvformat }}"
+
+ - name: Create a fc porset
+ ibm.storage_virtualize.ibm_svc_manage_portset:
+ clustername: "{{ cluster_ip }}"
+ token: "{{ specv_token.token }}"
+ name: "{{ portset_name }}"
+ porttype: fc
+ portset_type: host
+ state: present
+
+ - name: Add port ID to the portset
+ ibm.storage_virtualize.ibm_sv_manage_fcportsetmember:
+ clustername: "{{ cluster_ip }}"
+ token: "{{ specv_token.token }}"
+ name: "{{ portset_name }}"
+ fcportid: "{{item}}"
+ state: present
+ loop: "{{ cluster_fcioportid }}"
+
+ - name: Create vdisk
+ register: results_cvdisk
+ ibm.storage_virtualize.ibm_svc_manage_volume:
+ clustername: "{{cluster_ip}}"
+ token: "{{ specv_token.token }}"
+ domain:
+ state: present
+ name: "{{item.vol_name}}"
+ pool: "{{item.mdiskgrp}}"
+ size: "{{item.size}}"
+ unit: "{{item.unit}}"
+ loop: "{{ volume_details }}"
+
+ - name: map Host to Vdisk
+ ibm.storage_virtualize.ibm_svc_vol_map:
+ clustername: "{{cluster_ip}}"
+ token: "{{ specv_token.token }}"
+ domain:
+ state: present
+ volname: "{{item.vol_name}}"
+ host: "{{host_name}}"
+ loop: "{{ volume_details }}"
+
+ - name: Rescan the paths on the host and run multipath
+ shell: "ssh {{application_host_username}}@{{application_host_ip}} rescan-scsi-bus.sh -i --forcerescan;sleep 40;"
+
+ - shell: "ssh {{application_host_username}}@{{application_host_ip}} multipath -ll"
+ register: ps
+
+ - name: Separate facts
+ set_fact:
+ multipath_var: "{{ ps.stdout.split('mpath') }}"
+
+ - debug:
+ msg: "{{ multipath_var}}"
+
+ - name: Get deatils of the given volume
+ register: volinfo
+ ibm.storage_virtualize.ibm_svc_info:
+ clustername: "{{ cluster_ip }}"
+ token: "{{ specv_token.token }}"
+ gather_subset: [vol]
+ log_path: /tmp/volinfo.debug
+
+ - name: create volume list
+ set_fact:
+ vol_name_list: "{{ vol_name_list|default([])+ [item['vol_name']] }}"
+ loop: "{{ volume_details }}"
+
+ - debug:
+ msg: "{{ vol_name_list }}"
+
+ - name: find vollist data
+ set_fact:
+ vol_list_full_data: "{{ vol_list_full_data|default([])+ [item] }}"
+ vol_name_uid: "{{ vol_name_uid|default([])+[[item['volume_name'],item['vdisk_UID']|lower]]}}"
+ when: (item.volume_name in vol_name_list )
+ loop: "{{ volinfo.Volume }}"
+
+ - debug:
+ msg: "{{ vol_name_uid }}"
+
+ - name: Find vdisk UID present in host with path
+ set_fact:
+ dm_device: "{{dm_device| default([]) +[ [item.0] + [item.1] + [item.2]]}}"
+ when: (item.1 in item.2)
+ with_nested:
+ - "{{ vol_name_uid }}"
+ - "{{ multipath_var }}"
+
+ - name: find unmapped volume
+ set_fact:
+ vdisk_mapped_multipath: "{{vdisk_mapped_multipath| default([]) + [item[0]]}}"
+ loop: "{{ dm_device }}"
+
+ - debug:
+ msg: "{{ vdisk_mapped_multipath }}"
+
+ - name: find unmapped volume
+ set_fact:
+ unmaped_vol_name_list: "{{ unmaped_vol_name_list|default([])+ [item] }}"
+ when: (item not in vdisk_mapped_multipath)
+ loop: "{{ vol_name_list }}"
+
+ - debug:
+ msg: "{{ unmaped_vol_name_list }}"
diff --git a/ansible_collections/ibm/storage_virtualize/playbooks/multi_volume_create_host_mapping_zone_multipath/multiple_vol_creation_zone_map_vars.txt b/ansible_collections/ibm/storage_virtualize/playbooks/multi_volume_create_host_mapping_zone_multipath/multiple_vol_creation_zone_map_vars.txt
new file mode 100644
index 000000000..8a4fcdb18
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/playbooks/multi_volume_create_host_mapping_zone_multipath/multiple_vol_creation_zone_map_vars.txt
@@ -0,0 +1,30 @@
+application_host_details:
+application_host_name: linux_host
+application_host_ip: a.b.c.d
+application_host_username: username
+application_host_password: password
+application_host_zone_name: test
+application_host_wwpns: ["10:00:00:90:fa:94:20:d0","10:00:00:90:fa:94:20:d2"]
+
+cluster_ip: x.x.x.x
+cluster_username: username1
+cluster_password: password1
+cluster_fcioportid: ['1']
+
+host_name: linux_ansible
+portset_name: portset_ansible
+portset_type: host
+port_type: fc
+brocade_switch_ip: z.z.z.z
+brocade_switch_username: username2
+brocade_switch_password: password2
+
+volume_details:
+ - vol_name: vdisk_3
+ mdiskgrp: "0"
+ size: "5"
+ unit: "gb"
+ - vol_name: vdisk_4
+ mdiskgrp: "0"
+ size: "5"
+ unit: "gb"
diff --git a/ansible_collections/ibm/storage_virtualize/playbooks/security_mgmt.yml b/ansible_collections/ibm/storage_virtualize/playbooks/security_mgmt.yml
new file mode 100644
index 000000000..4fe6cdc6c
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/playbooks/security_mgmt.yml
@@ -0,0 +1,27 @@
+- name: Using Storage Virtualize collection to change security settings
+ hosts: localhost
+ vars:
+ - clustername: clustername
+ - username: username
+ - password: password
+
+ collections:
+ - ibm.storage_virtualize
+ gather_facts: no
+ connection: local
+ tasks:
+ - name: Change max failed login limit
+ ibm.storage_virtualize.ibm_sv_manage_security:
+ clustername: "{{ clustername }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ log_path: "/tmp/playbook.debug"
+ maxfailedlogins: 5
+
+ - name: Change SSH protocol level
+ ibm.storage_virtualize.ibm_sv_manage_security:
+ clustername: "{{ clustername }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ log_path: "/tmp/playbook.debug"
+ sshprotocol: 2 \ No newline at end of file
diff --git a/ansible_collections/ibm/storage_virtualize/playbooks/volume_migrate.yml b/ansible_collections/ibm/storage_virtualize/playbooks/volume_migrate.yml
new file mode 100644
index 000000000..4fed509b0
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/playbooks/volume_migrate.yml
@@ -0,0 +1,79 @@
+- name: Using Storage Virtualize collection to initiate migration
+ hosts: localhost
+ vars:
+ - auxcluster: x.x.x.x
+ - auxusername: auxusername
+ - auxpassword: auxpassword
+ - clustername: clustername
+ - username: username
+ - password: password
+ - cgname: Group_cg11
+ - remote_cluster: Cluster_x.x.x.x
+ - masterpool: site1pool1
+ - mastervol: master
+ - relname: scopy5
+ - auxvol: auxvol
+ - fcwwpn: fcwwpn
+ - size: 1
+ - unit: gb
+ - remote_pool: remote_pool
+ collections:
+ - ibm.storage_virtualize
+ gather_facts: no
+ connection: local
+ tasks:
+ - name: Fetch authorization token for aux
+ register: auth
+ ibm_svc_auth:
+ clustername: "{{ auxcluster }}"
+ username: "{{auxusername}}"
+ password: "{{auxpassword}}"
+ - name: Fetch authorization token for master
+ register: results
+ ibm_svc_auth:
+ clustername: "{{clustername}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ - name: "create host"
+ ibm_svc_host:
+ clustername: "{{clustername}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ name: host_xyz
+ state: present
+ fcwwpn: "{{fcwwpn}}"
+ protocol: scsi
+ - name: "Create source volume source_vol_1 "
+ ibm_svc_manage_volume:
+ clustername: "{{ clustername }}"
+ token: "{{results.token}}"
+ pool: "{{masterpool}}"
+ name: "source_vol_1"
+ size: "{{size}}"
+ unit: "{{ unit }}"
+ state: present
+ - name: Map Source volume to a host
+ ibm_svc_vol_map:
+ clustername: "{{clustername}}"
+ token: "{{results.token}}"
+ volname: "source_vol_1"
+ host: "host_xyz"
+ state: present
+ - name: Try to initiate a volume migration with replicate_hosts as true when no hosts exists on targets system as on source system"
+ ibm_svc_manage_migration:
+ source_volume: "source_vol_1"
+ target_volume: "target_vol_1"
+ clustername: "{{ clustername }}"
+ remote_cluster: "{{ remote_cluster }}"
+ token: "{{ results.token }}"
+ state: initiate
+ replicate_hosts: true
+ remote_token: "{{ auth.token }}"
+ relationship_name: "mmapping_1"
+ remote_pool: "{{ remote_pool}}"
+ - name: Switch replication direction of a migration relationship when all host are mapped"
+ ibm_svc_manage_migration:
+ relationship_name: "mmapping_1"
+ clustername: "{{ clustername}}"
+ token: "{{ results.token }}"
+ state: switch
diff --git a/ansible_collections/ibm/storage_virtualize/playbooks/volume_migration/README.txt b/ansible_collections/ibm/storage_virtualize/playbooks/volume_migration/README.txt
new file mode 100644
index 000000000..104909118
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/playbooks/volume_migration/README.txt
@@ -0,0 +1,36 @@
+Objective:
+Migrate volume from one Flash System to another Flash System in application transparent manner.
+
+Prerequisite:
+- IBM storage Virtualize and Brocade ansible collection plugins must be installed
+- For more information on Brocade switch ansible collection, please refer to https://github.com/brocade/ansible/blob/master/README.rst
+
+These playbooks migrate a volume from a source cluster to the destination cluster.
+ - It uses storage virtualize ansible modules as well as brocade ansible modules to create zone.
+ - These playbooks are designed to migrate volume mapped to same Fibre Channel (FC) host from source cluster to destination cluster.
+
+There are total 3 files used for this use-case.
+ 1. vol_migration_vars:
+ This file has all the variables required for playbooks.
+ - src_cluster_* : Parameters starting with src_cluster contain source cluster details from where user wants to migrate volume
+ - dest_cluster* : Parameters starting with dest_cluster contain destination cluster details to where volume will be migrated
+ - brocade_switch_* : Parameters starting with brocade_switch contain brocade switch details
+ - application_host_*: Parameters starting with application_host contain application host details which is performing read/write of data
+ - volume_details : It consists of volume to be migrated with its source and destination name with host it is attached to
+ 2. initiate_migration_for_given_volume:
+ - This playbook initiates the migration, creates fc host with the same name as source cluster and adds it to the default portset.
+ - Most importantly, it also starts data copy from source cluster to destination cluster
+ Note:
+ User should not run playbook create_zone_map_volume_and_rescan until relationship is in consistent_syncronized state
+ 3. create_zone_map_volume_and_rescan
+ - Execute this playbook once the relationship created by above playbook is in consistent_syncronized state.
+ - This playbook fetches the list of SCSI_HOST WWPN's associated with given fcioportid from specV destination cluster.
+ - Creates zone with the name given and add specV ports fetched and host WWPN's given.
+ - Maps the volume to the Host and starts scsi rescan on the host.
+ - Switch replication direction of a migration relationship once host is mapped.
+ - Again rescan the volume on the host to get the updated path details.
+ - Delete source volume and migration relationship which was created.
+ - Again rescan the volume on the host to get the reduced paths.
+
+ Authors: Ajinkya Nanavati (ananava1@in.ibm.com)
+ Mohit Chitlange (mochitla@in.ibm.com)
diff --git a/ansible_collections/ibm/storage_virtualize/playbooks/volume_migration/initiate_migration.yml b/ansible_collections/ibm/storage_virtualize/playbooks/volume_migration/initiate_migration.yml
new file mode 100644
index 000000000..541b509b2
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/playbooks/volume_migration/initiate_migration.yml
@@ -0,0 +1,33 @@
+- name: Using Storage Virtualize collection to initiate migration
+ hosts: localhost
+ vars_files:
+ - vol_migration_vars.txt
+ collections:
+ - ibm.storage_virtualize
+ gather_facts: no
+ connection: local
+ tasks:
+ - name: Fetch authorization token for source
+ register: src_token
+ ibm_svc_auth:
+ clustername: "{{ src_cluster_ip }}"
+ username: "{{ src_cluster_username }}"
+ password: "{{ src_cluster_password }}"
+ - name: Fetch authorization token for destination
+ register: dest_token
+ ibm_svc_auth:
+ clustername: "{{ dest_cluster_ip }}"
+ username: "{{ dest_cluster_username }}"
+ password: "{{ dest_cluster_password }}"
+ - name: Initiate a volume migration with replicate_hosts as true
+ ibm_svc_manage_migration:
+ source_volume: "{{ src_vol_name }}"
+ target_volume: "{{ dest_vol_name if dest_vol_name is defined else src_vol_name }}"
+ clustername: "{{ src_cluster_ip }}"
+ remote_cluster: "{{ dest_cluster_name }}"
+ token: "{{ src_token.token }}"
+ state: initiate
+ replicate_hosts: true
+ remote_token: "{{ dest_token.token }}"
+ relationship_name: "{{ rel_name if rel_name is defined else src_vol_name }}"
+ remote_pool: "{{ dest_cluster_pool_name }}"
diff --git a/ansible_collections/ibm/storage_virtualize/playbooks/volume_migration/rescan_and_switch_paths.yml b/ansible_collections/ibm/storage_virtualize/playbooks/volume_migration/rescan_and_switch_paths.yml
new file mode 100644
index 000000000..64c9f9d40
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/playbooks/volume_migration/rescan_and_switch_paths.yml
@@ -0,0 +1,147 @@
+- name: Using Storage Virtualize collection to migrate given volume
+ hosts: localhost
+ vars_files:
+ - vol_migration_vars.txt
+ collections:
+ - ibm.storage_virtualize
+ - brocade
+ gather_facts: no
+ vars:
+ brocade_credentials:
+ fos_ip_addr: "{{ brocade_switch_ip }}"
+ fos_user_name: "{{ brocade_switch_username }}"
+ fos_password: "{{ brocade_switch_password }}"
+ https: False
+ dest_vol_name: "{{ dest_vol_name if dest_vol_name is defined else src_vol_name }}"
+ dest_host_name: "{{ host_name }}"
+ connection: local
+ tasks:
+ - name: Fetch authorization token for source
+ register: src_token
+ ibm_svc_auth:
+ clustername: "{{ src_cluster_ip }}"
+ username: "{{ src_cluster_username }}"
+ password: "{{ src_cluster_password }}"
+ - name: Fetch authorization token for destination
+ register: dest_token
+ ibm_svc_auth:
+ clustername: "{{ dest_cluster_ip }}"
+ username: "{{ dest_cluster_username }}"
+ password: "{{ dest_cluster_password }}"
+ - name: Get deatils of the given volume
+ register: volinfo
+ ibm.storage_virtualize.ibm_svc_info:
+ clustername: "{{ dest_cluster_ip }}"
+ token: "{{ dest_token.token }}"
+ gather_subset: [vol]
+ objectname: "{{ dest_vol_name }}"
+ log_path: /tmp/volinfo.debug
+ - name: Get the volume UID data
+ set_fact:
+ vol_uid: "{{ volinfo.Volume[0]['vdisk_UID'] | lower }}"
+ when: volinfo.Volume[0] is defined
+ - name: Get deatils of the targetportfc.
+ register: fcdetails
+ ibm.storage_virtualize.ibm_svc_info:
+ clustername: "{{ dest_cluster_ip }}"
+ token: "{{ dest_token.token }}"
+ gather_subset: [targetportfc]
+ log_path: /tmp/fcdetails.debug
+ - name: get the WWPN list from lstargetportfc for given fc_port_id
+ set_fact:
+ specv_wwpn: "{{ specv_wwpn|default([]) + [item['WWPN']]}}"
+ when: (item.protocol == 'scsi' and item.host_io_permitted == 'yes' and item.fc_io_port_id in dest_cluster_fcioportid)
+ loop: "{{ fcdetails.TargetPortFC }}"
+ - name: modify svc wwpn the way switch want
+ set_fact:
+ specv_wwpn_switch_format: "{{ specv_wwpn_switch_format|default([]) +[item|map('join')|join(':')] }}"
+ loop: "{{ (specv_wwpn)|map('batch', 2)|map('list')|list|lower }}"
+ - name: get all zoning information from switch
+ brocade_facts:
+ credential: "{{brocade_credentials}}"
+ vfid: -1
+ gather_subset:
+ - brocade_zoning
+ - name: copy the active config in var active_switch_config
+ set_fact:
+ active_switch_config: "{{ ansible_facts.brocade_zoning['effective-configuration'].cfg_name }}"
+ - name: Create zones on Brocade switch
+ vars:
+ zone:
+ - name: "{{ application_host_zone_name }}"
+ members: "{{ application_host_wwpns + specv_wwpn_switch_format }}"
+ brocade.fos.brocade_zoning_zone:
+ credential: "{{ brocade_credentials }}"
+ vfid: -1
+ zones: "{{ zone }}"
+ members_add_only: True
+ - name: Add zone to active configuration
+ vars:
+ cfgs:
+ - name: "{{ active_switch_config }}"
+ members:
+ - "{{ application_host_zone_name }}"
+ brocade_zoning_cfg:
+ credential: "{{ brocade_credentials }}"
+ vfid: -1
+ members_add_only: True
+ cfgs: "{{ cfgs }}"
+ active_cfg: "{{ active_switch_config }}"
+ - name: map Vol to host
+ ibm_svc_vol_map:
+ clustername: "{{ dest_cluster_ip }}"
+ token: "{{ dest_token.token }}"
+ state: present
+ volname: "{{ dest_vol_name }}"
+ host: "{{ dest_host_name }}"
+ scsi: 0
+ - name: Rescan the paths on the host and run multipath
+ shell: "ssh root@{{application_host_ip}} rescan-scsi-bus.sh -i --forcerescan;sleep 40;"
+ - shell: "ssh root@{{application_host_ip}} multipath -ll"
+ register: ps
+ - name: Separate facts
+ set_fact:
+ multipath_var: "{{ ps.stdout.split('mpath') }}"
+ - name: Find Vol UID present in host with path
+ set_fact:
+ dm_device: "{{item}}"
+ loop: "{{ multipath_var }}"
+ when: vol_uid in item
+ - name: Switch replication direction of a migration relationship when host is mapped
+ ibm_svc_manage_migration:
+ relationship_name: "{{ rel_name if rel_name is defined else src_vol_name }}"
+ clustername: "{{ src_cluster_ip }}"
+ token: "{{ src_token.token }}"
+ state: switch
+ - name: Rescan the scsi bus devices on the host
+ ansible.builtin.shell: "ssh root@{{application_host_ip}} rescan-scsi-bus.sh -i --forcerescan"
+ - shell: "ssh root@{{application_host_ip}} multipath -ll"
+ register: ps
+ - name: Separate facts
+ set_fact:
+ multipath_var: "{{ ps.stdout.split('mpath') }}"
+ - name: Find Vol UID present in host with path
+ set_fact:
+ dm_device: "{{item}}"
+ loop: "{{ multipath_var }}"
+ when: vol_uid in item
+ - name: Delete source volume and migration relationship
+ ibm_svc_manage_migration:
+ clustername: "{{ src_cluster_ip }}"
+ state: cleanup
+ source_volume: "{{ src_vol_name}}"
+ token: "{{ src_token.token }}"
+ log_path: /tmp/ansible.log
+ - shell: "ssh root@{{application_host_ip}} rescan-scsi-bus.sh -i --forcerescan; sleep 40;"
+ - shell: "ssh root@{{application_host_ip}} multipath -ll"
+ register: ps
+ - name: Separate facts
+ set_fact:
+ multipath_var: "{{ ps.stdout.split('mpath') }}"
+ - name: Find Vol UID present in host with path
+ set_fact:
+ dm_device: "{{item}}"
+ loop: "{{ multipath_var }}"
+ when: vol_uid in item
+ - debug:
+ msg: "{{ dm_device }}"
diff --git a/ansible_collections/ibm/storage_virtualize/playbooks/volume_migration/vol_migration_vars.txt b/ansible_collections/ibm/storage_virtualize/playbooks/volume_migration/vol_migration_vars.txt
new file mode 100644
index 000000000..1d9f399e8
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/playbooks/volume_migration/vol_migration_vars.txt
@@ -0,0 +1,28 @@
+src_cluster_name: Master
+src_cluster_ip: x.x.x.x
+src_cluster_username: username
+src_cluster_password: password
+
+dest_cluster_name: Aux_far
+dest_cluster_ip: y.y.y.y
+dest_cluster_username: username1
+dest_cluster_password: password1
+dest_cluster_pool_name: Pool0
+dest_cluster_fcioportid: ['1']
+
+brocade_switch_ip: z.z.z.z
+brocade_switch_username: username2
+brocade_switch_password: password2
+
+application_host_details:
+application_host_name: linux_host
+application_host_ip: a.b.c.d
+application_host_username: username4
+application_host_password: password4
+application_host_wwpns: ["10:00:00:90:fa:94:20:d0","10:00:00:90:fa:94:20:d2"]
+application_host_zone_name: test
+
+src_vol_name: vdisk_application1
+host_name: linux_host
+dest_vol_name: vdisk_application1
+rel_name: r1
diff --git a/ansible_collections/ibm/storage_virtualize/playbooks/volume_migration_on_svc_iscsi/Readme.txt b/ansible_collections/ibm/storage_virtualize/playbooks/volume_migration_on_svc_iscsi/Readme.txt
new file mode 100644
index 000000000..a69bd5c75
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/playbooks/volume_migration_on_svc_iscsi/Readme.txt
@@ -0,0 +1,45 @@
+Objective:
+Migrate volume from one Flash System to another Flash System in application transparent manner with target host as ISCSI.
+
+Prerequisite:
+- IBM Storage Virtualize ansible collection plugins must be installed
+
+These playbooks migrate a volume from a source cluster to the destination cluster.
+These playbooks are designed to migrate volume mapped to Fibre Channel (FC) host or ISCSI host from source cluster to ISCSI host on destination cluster.
+
+There are total 3 files used for this use-case.
+ 1. vol_migration_vars:
+ This file has all the variables required for playbooks
+ - src_cluster_* : Parameters starting with src_cluster contain source cluster details from where user wants to migrate volume
+ - src_cluster_* : Parameters starting with src_cluster contain source cluster details from where user wants to migrate volume
+ - dest_cluster* : Parameters starting with dest_cluster contain destination cluster details to where volume will be migrated
+ - application_host_* : Parameters starting with application_host contain application host details which is performing read/write of data
+ - application_iscsi_ip : This contains in detail information for ip to be given to node with detail information as follows
+ - node_name: Node name of cluster
+ - portset: portset name to be used
+ - ip_address: <ip address>
+ - subnet_prefix: <prefix>
+ - gateway: <gateway>
+ - port: <port_id>
+ - src_vol_name : This suggest volume name of source cluster which is to be migrated
+ - dest_vol_name : This create volume name at destination cluster
+ - rel_name : This is name of relationship to be created between source and destination cluster
+ 2. initiate_migration_for_given_volume:
+ - This playbook initiates the migration
+ - Most importantly, it also starts data copy from source cluster to destination cluster
+ Note:
+ User should not run playbook create_zone_map_volume_and_rescan until relationship is in consistent_syncronized state
+ 3. create_host_map_volume_and_rescan
+ - Execute this playbook once the relationship created by above playbook is in consistent_syncronized state
+ - create iscsi host on flashsystem from iqn defined in variable application_host_iqn from variable file
+ - configuring ip on each node for iscsi host connectivity
+ - establish iscsi session from host to flashsystem nodes
+ - Maps the volume to the Host and starts scsi rescan on the host
+ - Switch replication direction of a migration relationship once host is mapped
+ - Again rescan the volume on the host to get the updated path details
+ - Delete source volume and migration relationship which was created
+ - Again rescan the multipath and expect migrated volume has the only path from destiantion cluster
+
+ Authors: Ajinkya Nanavati (ananava1@in.ibm.com)
+ Mohit Chitlange (mochitla@in.ibm.com)
+ Devendra Mahajan (demahaj1@in.ibm.com)
diff --git a/ansible_collections/ibm/storage_virtualize/playbooks/volume_migration_on_svc_iscsi/create_iscsi_host_map_vol_switch.yml b/ansible_collections/ibm/storage_virtualize/playbooks/volume_migration_on_svc_iscsi/create_iscsi_host_map_vol_switch.yml
new file mode 100644
index 000000000..2862f73e1
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/playbooks/volume_migration_on_svc_iscsi/create_iscsi_host_map_vol_switch.yml
@@ -0,0 +1,143 @@
+- name: Using Storage Virtualize collection to migrate given volume
+ hosts: localhost
+ vars_files:
+ - vol_migration_vars
+ collections:
+ - ibm.storage_virtualize
+
+ gather_facts: no
+ vars:
+ dest_vol_name: "{{ dest_vol_name if dest_vol_name is defined else src_vol_name }}"
+ dest_host_name: "{{ host_name }}"
+ connection: local
+ tasks:
+ - name: Fetch authorization token for source
+ register: src_token
+ ibm_svc_auth:
+ clustername: "{{ src_cluster_ip }}"
+ username: "{{ src_cluster_username }}"
+ password: "{{ src_cluster_password }}"
+
+ - name: Fetch authorization token for destination
+ register: dest_token
+ ibm_svc_auth:
+ clustername: "{{ dest_cluster_ip }}"
+ username: "{{ dest_cluster_username }}"
+ password: "{{ dest_cluster_password }}"
+
+ - name: Get deatils of the given volume
+ register: volinfo
+ ibm.storage_virtualize.ibm_svc_info:
+ clustername: "{{ dest_cluster_ip }}"
+ token: "{{ dest_token.token }}"
+ gather_subset: [vol]
+ objectname: "{{ dest_vol_name }}"
+ log_path: /tmp/volinfo.debug
+
+ - name: Get the volume UID data
+ set_fact:
+ vol_uid: "{{ volinfo.Volume[0]['vdisk_UID'] | lower }}"
+ when: volinfo.Volume[0] is defined
+
+ - name: Creating Host on SVC
+ ibm_svc_host:
+ clustername: "{{ dest_cluster_ip }}"
+ token: "{{ dest_token.token }}"
+ name: "{{ dest_host_name }}"
+ state: present
+ iscsiname: "{{ application_host_iqn }}"
+
+ - name: map Vdisk to host
+ ibm_svc_vol_map:
+ clustername: "{{ dest_cluster_ip }}"
+ token: "{{ dest_token.token }}"
+ state: present
+ volname: "{{ dest_vol_name }}"
+ host: "{{ dest_host_name }}"
+ scsi: 0
+
+ - name: Create IP provisioning
+ ibm.storage_virtualize.ibm_svc_manage_ip:
+ clustername: "{{ dest_cluster_ip }}"
+ token: "{{ dest_token.token }}"
+ log_path: /tmp/playbook.debug
+ node: "{{ item.node_name }}"
+ port: "{{ item.port }}"
+ portset: "{{ item.portset }}"
+ ip_address: "{{ item.ip_address }}"
+ subnet_prefix: "{{ item.subnet_prefix }}"
+ gateway: "{{ item.gateway }}"
+ state: present
+ loop: "{{ application_iscsi_ip }}"
+
+ - name: Create iscsi session
+ shell: ssh {{ application_host_username }}@{{ application_host_ip }} "iscsiadm --mode discovery --type sendtargets --portal {{item.ip_address}} -l"
+ loop: "{{ application_iscsi_ip }}"
+
+ - shell: ssh {{ application_host_username }}@{{ application_host_ip }} "multipath -ll"
+ register: ps
+
+ - name: Separate facts
+ set_fact:
+ multipath_var: "{{ ps.stdout.split('mpath') }}"
+
+ - debug:
+ msg: "{{ multipath_var}}"
+
+ - name: Find vdisk UID present in host with path
+ set_fact:
+ dm_device: "{{item}}"
+ loop: "{{ multipath_var }}"
+ when: vol_uid in item
+
+ - debug:
+ msg: "{{ dm_device}}"
+
+ - name: Switch replication direction of a migration relationship
+ ibm_svc_manage_migration:
+ relationship_name: "{{ rel_name if rel_name is defined else src_vol_name }}"
+ clustername: "{{ src_cluster_ip }}"
+ token: "{{ src_token.token }}"
+ state: switch
+
+ - shell: ssh {{ application_host_username }}@{{ application_host_ip }} "rescan-scsi-bus.sh -i --forcerescan; sleep 40;"
+ - shell: ssh {{ application_host_username }}@{{ application_host_ip }} "multipath -ll"
+ register: ps
+
+ - name: Separate facts
+ set_fact:
+ multipath_var: "{{ ps.stdout.split('mpath') }}"
+
+ - name: Find vdisk UID present in host with path
+ set_fact:
+ dm_device: "{{item}}"
+ loop: "{{ multipath_var }}"
+ when: vol_uid in item
+
+ - debug:
+ msg: "{{ dm_device }}"
+
+ - name: Delete source volume and migration relationship
+ ibm_svc_manage_migration:
+ clustername: "{{ src_cluster_ip }}"
+ state: cleanup
+ source_volume: "{{ src_vol_name }}"
+ token: "{{ src_token.token }}"
+ log_path: /tmp/ansible.log
+
+ - shell: ssh {{ application_host_username }}@{{ application_host_ip }} "rescan-scsi-bus.sh -i --forcerescan; sleep 40;"
+ - shell: ssh {{ application_host_username }}@{{ application_host_ip }} "multipath -ll"
+ register: ps
+
+ - name: Separate facts
+ set_fact:
+ multipath_var: "{{ ps.stdout.split('mpath') }}"
+
+ - name: Find vdisk UID present in host with path
+ set_fact:
+ dm_device: "{{item}}"
+ loop: "{{ multipath_var }}"
+ when: vol_uid in item
+
+ - debug:
+ msg: "{{ dm_device}}"
diff --git a/ansible_collections/ibm/storage_virtualize/playbooks/volume_migration_on_svc_iscsi/initiate_migration_for_given_volume.yml b/ansible_collections/ibm/storage_virtualize/playbooks/volume_migration_on_svc_iscsi/initiate_migration_for_given_volume.yml
new file mode 100644
index 000000000..0df875c4e
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/playbooks/volume_migration_on_svc_iscsi/initiate_migration_for_given_volume.yml
@@ -0,0 +1,33 @@
+- name: Using Storage Virtualize collection to initiate migration
+ hosts: localhost
+ vars_files:
+ - vol_migration_vars
+ collections:
+ - ibm.storage_virtualize
+ gather_facts: no
+ connection: local
+ tasks:
+ - name: Fetch authorization token for source
+ register: src_token
+ ibm_svc_auth:
+ clustername: "{{ src_cluster_ip }}"
+ username: "{{ src_cluster_username }}"
+ password: "{{ src_cluster_password }}"
+ - name: Fetch authorization token for destination
+ register: dest_token
+ ibm_svc_auth:
+ clustername: "{{ dest_cluster_ip }}"
+ username: "{{ dest_cluster_username }}"
+ password: "{{ dest_cluster_password }}"
+ - name: Initiate a volume migration with replicate_hosts as false
+ ibm_svc_manage_migration:
+ source_volume: "{{ src_vol_name }}"
+ target_volume: "{{ dest_vol_name if dest_vol_name is defined else src_vol_name }}"
+ clustername: "{{ src_cluster_ip }}"
+ remote_cluster: "{{ dest_cluster_name }}"
+ token: "{{ src_token.token }}"
+ state: initiate
+ replicate_hosts: false
+ remote_token: "{{ dest_token.token }}"
+ relationship_name: "{{ rel_name if rel_name is defined else src_vol_name }}"
+ remote_pool: "{{ dest_cluster_pool_name }}"
diff --git a/ansible_collections/ibm/storage_virtualize/playbooks/volume_migration_on_svc_iscsi/vol_migration_vars.txt b/ansible_collections/ibm/storage_virtualize/playbooks/volume_migration_on_svc_iscsi/vol_migration_vars.txt
new file mode 100644
index 000000000..a7e18d7df
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/playbooks/volume_migration_on_svc_iscsi/vol_migration_vars.txt
@@ -0,0 +1,36 @@
+src_cluster_name: Master
+src_cluster_ip: x.x.x.x
+src_cluster_username: username
+src_cluster_password: password
+
+dest_cluster_name: Aux_far
+dest_cluster_ip: y.y.y.y
+dest_cluster_username: username1
+dest_cluster_password: password1
+dest_cluster_pool_name: mdiskgrp0
+
+application_host_details:
+application_host_name: linux_host
+application_host_ip: a.b.c.d
+application_host_username: username2
+application_host_password: password2
+application_host_iqn: "iqn.1994-05.com.redhat:5e54d1815f55"
+
+application_iscsi_ip:
+ - node_name: node1
+ portset: portset0
+ ip_address: 192.168.100.121
+ subnet_prefix: 24
+ gateway: 192.168.100.1
+ port: 6
+ - node_name: node2
+ portset: portset0
+ ip_address: 192.168.100.122
+ subnet_prefix: 24
+ gateway: 192.168.100.1
+ port: 6
+
+src_vol_name: vdisk_application1
+host_name: linux_host
+dest_vol_name: vdisk_application1
+rel_name: r1
diff --git a/ansible_collections/ibm/storage_virtualize/playbooks/volumegrp_create.yml b/ansible_collections/ibm/storage_virtualize/playbooks/volumegrp_create.yml
new file mode 100644
index 000000000..6bf9bc7f6
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/playbooks/volumegrp_create.yml
@@ -0,0 +1,29 @@
+- name: Using Storage Virtualize collection to create a volume group
+ hosts: localhost
+ vars:
+ - clustername: clustername
+ - username: username
+ - password: password
+ - domain: domain
+ collections:
+ - ibm.storage_virtualize
+ gather_facts: no
+ connection: local
+ tasks:
+ - name: Create a new volume group
+ ibm_svc_manage_volumegroup:
+ clustername: "{{ clustername }}"
+ domain: "{{ domain }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ name: vg1
+ state: present
+ - name: Create volumegroup with existing snapshotpolicy
+ ibm_svc_manage_volumegroup:
+ clustername: "{{ clustername }}"
+ domain: "{{ domain }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ name: vg2
+ state: present
+ snapshotpolicy: snapshotpolicy2
diff --git a/ansible_collections/ibm/storage_virtualize/plugins/module_utils/ibm_svc_ssh.py b/ansible_collections/ibm/storage_virtualize/plugins/module_utils/ibm_svc_ssh.py
new file mode 100644
index 000000000..d42200733
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/plugins/module_utils/ibm_svc_ssh.py
@@ -0,0 +1,129 @@
+# Copyright (C) 2024 IBM CORPORATION
+# Author(s): Shilpi Jain <shilpi.jain1@ibm.com>
+# Sandip G. Rajbanshi <sandip.rajbanshi@ibm.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+""" Support class for IBM SVC generic ansible module """
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+import inspect
+import uuid
+from ansible.module_utils.compat.paramiko import paramiko
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import get_logger
+
+COLLECTION_VERSION = "2.3.0"
+
+
+class IBMSVCssh(object):
+ """ Communicate with SVC through SSH
+ The module use paramiko to connect SVC
+ """
+
+ def __init__(self, module, clustername, username, password,
+ look_for_keys, key_filename, log_path):
+ """ Initialize module with what we need for initial connection
+ :param clustername: name of the SVC cluster
+ :type clustername: string
+ :param username: SVC username
+ :type username: string
+ :param password: Password for user
+ :type password: string
+ :param look_for_keys: whether to look for keys or not
+ :type look_for_keys: boolean
+ :param key_filename: SSH client private key file
+ :type key_filename: string
+ :param log_path: log file
+ :type log_path: string
+ """
+ self.module = module
+ self.clustername = clustername
+ self.username = username
+ self.password = password
+ self.look_for_keys = look_for_keys
+ self.key_filename = key_filename
+
+ self.is_client_connected = False
+
+ # logging setup
+ log = get_logger(self.__class__.__name__, log_path)
+ self.log = log.info
+
+ self.client_type = 'paramiko'
+ if paramiko is None:
+ self.module.fail_json(msg='paramiko is not installed')
+ self.client = paramiko.SSHClient()
+
+ # connect through SSH
+ self.is_client_connected = self._svc_connect()
+ if not self.is_client_connected:
+ self.module.fail_json(msg='Failed to connect')
+
+ def _svc_connect(self):
+ """
+ Initialize a SSH connection with properties
+ which were set up in constructor.
+ :return: True or False
+ """
+ self.client.load_system_host_keys()
+ self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+ try:
+ self.client.connect(
+ hostname=self.clustername,
+ username=self.username,
+ password=self.password,
+ look_for_keys=self.look_for_keys,
+ key_filename=self.key_filename)
+ self.register_plugin()
+ return True
+ except paramiko.BadHostKeyException as e:
+ self.log("BadHostKeyException %s", e)
+ except paramiko.AuthenticationException as e:
+ self.log("AuthenticationException %s", e)
+ except paramiko.SSHException as e:
+ self.log("SSHException %s", e)
+ except Exception as e:
+ self.log("SSH connection failed %s", e)
+ return False
+
+ def is_connected(self):
+ return self.is_client_connected
+
+ def _svc_disconnect(self):
+ """
+ Disconnect from the SSH server.
+ """
+ try:
+ self.client.close()
+ self.is_client_connected = False
+ self.log("SSH disconnected")
+ return True
+ except Exception as e:
+ self.log("SSH Disconnection failed %s", e)
+ return False
+
+ def register_plugin(self):
+ try:
+ cmd = 'svctask registerplugin'
+ cmdopts = {}
+ name = "Ansible"
+ unique_key = self.username + "_" + str(uuid.getnode())
+ caller_class = inspect.stack()[2].frame.f_locals.get('self', None)
+ caller_class_name = caller_class.__class__.__name__
+ module_name = str(inspect.stack()[3].filename).rsplit('/', maxsplit=1)[-1]
+ metadata = module_name[:-3] + " module with class " + str(caller_class_name) + " has been executed by " + self.username
+
+ cmdopts['name'] = name
+ cmdopts['uniquekey'] = unique_key
+ cmdopts['version'] = COLLECTION_VERSION
+ cmdopts['metadata'] = metadata
+
+ for cmdoptions in cmdopts:
+ cmd = cmd + " -" + cmdoptions + " '" + cmdopts[cmdoptions] + "'"
+ self.client.exec_command(cmd)
+ return True
+ except Exception as e:
+ return False
diff --git a/ansible_collections/ibm/storage_virtualize/plugins/module_utils/ibm_svc_utils.py b/ansible_collections/ibm/storage_virtualize/plugins/module_utils/ibm_svc_utils.py
new file mode 100644
index 000000000..b732fadf6
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/plugins/module_utils/ibm_svc_utils.py
@@ -0,0 +1,360 @@
+# Copyright (C) 2024 IBM CORPORATION
+# Author(s): Peng Wang <wangpww@cn.ibm.com>
+# Sandip G. Rajbanshi <sandip.rajbanshi@ibm.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+""" Support class for IBM SVC ansible modules """
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+import json
+import logging
+import uuid
+import inspect
+from ansible.module_utils.urls import open_url
+from ansible.module_utils.six.moves.urllib.parse import quote
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+
+COLLECTION_VERSION = "2.3.0"
+
+
+def svc_argument_spec():
+ """
+ Returns argument_spec of options common to ibm_svc_*-modules
+
+ :returns: argument_spec
+ :rtype: dict
+ """
+ return dict(
+ clustername=dict(type='str', required=True),
+ domain=dict(type='str', default=None),
+ validate_certs=dict(type='bool', default=False),
+ username=dict(type='str'),
+ password=dict(type='str', no_log=True),
+ log_path=dict(type='str'),
+ token=dict(type='str', no_log=True)
+ )
+
+
+def svc_ssh_argument_spec():
+ """
+ Returns argument_spec of options common to ibm_svcinfo_command
+ and ibm_svctask_command modules
+
+ :returns: argument_spec
+ :rtype: dict
+ """
+ return dict(
+ clustername=dict(type='str', required=True),
+ username=dict(type='str', required=True),
+ password=dict(type='str', required=True, no_log=True),
+ log_path=dict(type='str')
+ )
+
+
+def strtobool(val):
+ '''
+ Converts a string representation to boolean.
+
+ This is a built-in function available in python till the version 3.9 under disutils.util
+ but this has been deprecated in 3.10 and may not be available in future python releases
+ so adding the source code here.
+ '''
+ if val in {'y', 'yes', 't', 'true', 'on', '1'}:
+ return 1
+ elif val in {'n', 'no', 'f', 'false', 'off', '0'}:
+ return 0
+ else:
+ raise ValueError("invalid truth value %r" % (val,))
+
+
+def get_logger(module_name, log_file_name, log_level=logging.INFO):
+ FORMAT = '%(asctime)s.%(msecs)03d %(levelname)5s %(thread)d %(filename)s:%(funcName)s():%(lineno)s %(message)s'
+ DATEFORMAT = '%Y-%m-%dT%H:%M:%S'
+ log_path = 'IBMSV_ansible_collections.log'
+ if log_file_name:
+ log_path = log_file_name
+ logging.basicConfig(filename=log_path, format=FORMAT, datefmt=DATEFORMAT)
+ log = logging.getLogger(module_name)
+ log.setLevel(log_level)
+ return log
+
+
+class IBMSVCRestApi(object):
+ """ Communicate with SVC through RestApi
+ SVC commands usually have the format
+ $ command -opt1 value1 -opt2 value2 arg1 arg2 arg3
+ to use the RestApi we transform this into
+ https://host:7443/rest/command/arg1/arg2/arg3
+ data={'opt1':'value1', 'opt2':'value2'}
+ """
+
+ def __init__(self, module, clustername, domain, username, password,
+ validate_certs, log_path, token):
+ """ Initialize module with what we need for initial connection
+ :param clustername: name of the SVC cluster
+ :type clustername: string
+ :param domain: domain name to make a fully qualified host name
+ :type domain: string
+ :param username: SVC username
+ :type username: string
+ :param password: Password for user
+ :type password: string
+ :param validate_certs: whether or not the connection is insecure
+ :type validate_certs: bool
+ """
+ self.module = module
+ self.clustername = clustername
+ self.domain = domain
+ self.username = username
+ self.password = password
+ self.validate_certs = validate_certs
+ self.token = token
+
+ # logging setup
+ log = get_logger(self.__class__.__name__, log_path)
+ self.log = log.info
+
+ # Make sure we can connect through the RestApi
+ if self.token is None:
+ if not self.username or not self.password:
+ self.module.fail_json(msg="You must pass in either pre-acquired token"
+ " or username/password to generate new token")
+ self.token = self._svc_authorize()
+ else:
+ self.log("Token already passed: %s", self.token)
+
+ if not self.token:
+ self.module.exit_json(msg='Failed to obtain access token', unreachable=True)
+
+ @property
+ def port(self):
+ return getattr(self, '_port', None) or '7443'
+
+ @property
+ def protocol(self):
+ return getattr(self, '_protocol', None) or 'https'
+
+ @property
+ def resturl(self):
+ if self.domain:
+ hostname = '%s.%s' % (self.clustername, self.domain)
+ else:
+ hostname = self.clustername
+ return (getattr(self, '_resturl', None)
+ or "{protocol}://{host}:{port}/rest".format(
+ protocol=self.protocol, host=hostname, port=self.port))
+
+ @property
+ def token(self):
+ return getattr(self, '_token', None) or None
+
+ @token.setter
+ def token(self, value):
+ return setattr(self, '_token', value)
+
+ def _svc_rest(self, method, headers, cmd, cmdopts, cmdargs, timeout=10):
+ """ Run SVC command with token info added into header
+ :param method: http method, POST or GET
+ :type method: string
+ :param headers: http headers
+ :type headers: dict
+ :param cmd: svc command to run
+ :type cmd: string
+ :param cmdopts: svc command options, name paramter and value
+ :type cmdopts: dict
+ :param cmdargs: svc command arguments, non-named paramaters
+ :type timeout: int
+ :param timeout: open_url argument to set timeout for http gateway
+ :return: dict of command results
+ :rtype: dict
+ """
+
+ # Catch any output or errors and pass back to the caller to deal with.
+ r = {
+ 'url': None,
+ 'code': None,
+ 'err': None,
+ 'out': None,
+ 'data': None
+ }
+
+ postfix = cmd
+ if cmdargs:
+ postfix = '/'.join([postfix] + [quote(str(a)) for a in cmdargs])
+ url = '/'.join([self.resturl] + [postfix])
+ r['url'] = url # Pass back in result for error handling
+ self.log("_svc_rest: url=%s", url)
+
+ payload = cmdopts if cmdopts else None
+ data = self.module.jsonify(payload).encode('utf8')
+ r['data'] = cmdopts # Original payload data has nicer formatting
+ self.log("_svc_rest: payload=%s", payload)
+
+ try:
+ o = open_url(url, method=method, headers=headers, timeout=timeout,
+ validate_certs=self.validate_certs, data=bytes(data))
+ except HTTPError as e:
+ self.log('_svc_rest: httperror %s', str(e))
+ r['code'] = e.getcode()
+ r['out'] = e.read()
+ r['err'] = "HTTPError %s", str(e)
+ return r
+ except Exception as e:
+ self.log('_svc_rest: exception : %s', str(e))
+ r['err'] = "Exception %s", str(e)
+ return r
+
+ try:
+ j = json.load(o)
+ except ValueError as e:
+ self.log("_svc_rest: value error pass: %s", str(e))
+ # pass, will mean both data and error are None.
+ return r
+
+ r['out'] = j
+ return r
+
+ def _svc_authorize(self):
+ """ Obtain a token if we are authoized to connect
+ :return: None or token string
+ """
+
+ headers = {
+ 'Content-Type': 'application/json',
+ 'X-Auth-Username': self.username,
+ 'X-Auth-Password': self.password
+ }
+
+ rest = self._svc_rest(method='POST', headers=headers, cmd='auth',
+ cmdopts=None, cmdargs=None)
+
+ rp_cmdopts = self.register_plugin_cmdopts()
+
+ if rest['err']:
+ return None
+
+ out = rest['out']
+ if out:
+ if 'token' in out:
+ try:
+ rp_headers = {
+ 'Content-Type': 'application/json',
+ 'X-Auth-Token': out['token']
+ }
+ self._svc_rest(method='POST', headers=rp_headers, cmd="registerplugin",
+ cmdopts=rp_cmdopts, cmdargs=None)
+ except Exception as e:
+ pass
+ return out['token']
+
+ return None
+
+ def _svc_token_wrap(self, cmd, cmdopts, cmdargs, timeout=10):
+ """ Run SVC command with token info added into header
+ :param cmd: svc command to run
+ :type cmd: string
+ :param cmdopts: svc command options, name paramter and value
+ :type cmdopts: dict
+ :param cmdargs: svc command arguments, non-named paramaters
+ :type cmdargs: list
+ :param timeout: open_url argument to set timeout for http gateway
+ :type timeout: int
+ :returns: command results
+ """
+
+ if self.token is None:
+ self.module.fail_json(msg="No authorize token")
+ # Abort
+
+ headers = {
+ 'Content-Type': 'application/json',
+ 'X-Auth-Token': self.token
+ }
+
+ return self._svc_rest(method='POST', headers=headers, cmd=cmd,
+ cmdopts=cmdopts, cmdargs=cmdargs, timeout=timeout)
+
+ def svc_run_command(self, cmd, cmdopts, cmdargs, timeout=10):
+ """ Generic execute a SVC command
+ :param cmd: svc command to run
+ :type cmd: string
+ :param cmdopts: svc command options, name parameter and value
+ :type cmdopts: dict
+ :param cmdargs: svc command arguments, non-named parameters
+ :type cmdargs: list
+ :param timeout: open_url argument to set timeout for http gateway
+ :type timeout: int
+ :returns: command output
+ """
+
+ rest = self._svc_token_wrap(cmd, cmdopts, cmdargs, timeout)
+ self.log("svc_run_command rest=%s", rest)
+
+ if rest['err']:
+ msg = rest
+ self.module.fail_json(msg=msg)
+ # Aborts
+
+ # Might be None
+ return rest['out']
+
+ def svc_obj_info(self, cmd, cmdopts, cmdargs, timeout=10):
+ """ Obtain information about an SVC object through the ls command
+ :param cmd: svc command to run
+ :type cmd: string
+ :param cmdopts: svc command options, name parameter and value
+ :type cmdopts: dict
+ :param cmdargs: svc command arguments, non-named paramaters
+ :type cmdargs: list
+ :param timeout: open_url argument to set timeout for http gateway
+ :type timeout: int
+ :returns: command output
+ :rtype: dict
+ """
+
+ rest = self._svc_token_wrap(cmd, cmdopts, cmdargs, timeout)
+ self.log("svc_obj_info rest=%s", rest)
+
+ if rest['code']:
+ if rest['code'] == 500:
+ # Object did not exist, which is quite valid.
+ return None
+
+ # Fail for anything else
+ if rest['err']:
+ self.module.fail_json(msg=rest)
+ # Aborts
+
+ # Might be None
+ return rest['out']
+
+ def get_auth_token(self):
+ """ Obtain information about an SVC object through the ls command
+ :returns: authentication token
+ """
+ # Make sure we can connect through the RestApi
+ self.token = self._svc_authorize()
+ self.log("_connect by using token")
+ if not self.token:
+ self.module.exit_json(msg='Failed to obtain access token', unreachable=True)
+
+ return self.token
+
+ def register_plugin_cmdopts(self):
+ cmdopts = {}
+ name = "Ansible"
+ unique_key = self.username + "_" + str(uuid.getnode())
+ caller_class = inspect.stack()[3].frame.f_locals.get('self', None)
+ caller_class_name = caller_class.__class__.__name__
+ module_name = str(inspect.stack()[3].filename).rsplit('/', maxsplit=1)[-1]
+ metadata = module_name[:-3] + " module with class " + str(caller_class_name) + " has been executed by " + self.username
+
+ cmdopts['name'] = name
+ cmdopts['uniquekey'] = unique_key
+ cmdopts['version'] = COLLECTION_VERSION
+ cmdopts['metadata'] = metadata
+ return cmdopts
diff --git a/ansible_collections/ibm/storage_virtualize/plugins/modules/__init__.py b/ansible_collections/ibm/storage_virtualize/plugins/modules/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/plugins/modules/__init__.py
diff --git a/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_sv_manage_awss3_cloudaccount.py b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_sv_manage_awss3_cloudaccount.py
new file mode 100644
index 000000000..f1533c248
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_sv_manage_awss3_cloudaccount.py
@@ -0,0 +1,496 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2022 IBM CORPORATION
+# Author(s): Sanjaikumaar M <sanjaikumaar.m@ibm.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_sv_manage_awss3_cloudaccount
+short_description: This module configures and manages Amazon Simple Storage Service (Amazon S3) cloud account on IBM Storage Virtualize family systems
+version_added: '1.11.0'
+description:
+ - Ansible interface to manage mkcloudaccountawss3, chcloudaccountawss3, and rmcloudaccount commands.
+options:
+ clustername:
+ description:
+ - The hostname or management IP of the Storage Virtualize system.
+ required: true
+ type: str
+ domain:
+ description:
+ - Domain for the Storage Virtualize system.
+ - Valid when hostname is used for the parameter I(clustername).
+ type: str
+ username:
+ description:
+ - REST API username for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ password:
+ description:
+ - REST API password for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ token:
+ description:
+ - The authentication token to verify a user on the Storage Virtualize system.
+ - To generate a token, use the M(ibm.storage_virtualize.ibm_svc_auth) module.
+ type: str
+ log_path:
+ description:
+ - Path of debug log file.
+ type: str
+ state:
+ description:
+ - Creates, updates (C(present)), or deletes (C(absent)) an Amazon S3 account.
+ choices: [ present, absent ]
+ required: true
+ type: str
+ name:
+ description:
+ - Specifies the name of an Amazon S3 account.
+ type: str
+ required: true
+ old_name:
+ description:
+ - Specifies the old name of an Amazon S3 account.
+ - Valid when I(state=present), to rename the existing Amazon S3 account.
+ type: str
+ bucketprefix:
+ description:
+ - Specifies the prefix for the bucket object.
+ - Applies, when I(state=present), to create an Amazon S3 account.
+ type: str
+ accesskeyid:
+ description:
+ - Specifies the public part of the Amazon S3 access key credential
+ of the AWS user that the system use to access the cloud storage.
+ type: str
+ secretaccesskey:
+ description:
+ - Specifies the secret access key of an Amazon S3 cloud account.
+ type: str
+ upbandwidthmbits:
+ description:
+ - Specifies the upload bandwidth limit in megabits per second (Mbps).
+ - The value must be a number 1-10240.
+ type: str
+ downbandwidthmbits:
+ description:
+ - Specifies the download bandwidth limit in megabits per second (Mbps).
+ - The value must be a number 1-10240.
+ type: str
+ region:
+ description:
+ - Specifies the AWS region to use to access the cloud account and store data.
+ type: str
+ encrypt:
+ description:
+ - Specifies whether to encrypt the data in the cloud account.
+ - By default, encryption is enabled if encryption is enabled on
+ the cluster unless I(encrypt=no) is specified.
+ - Valid when I(state=present) to create an Amazon S3 account.
+ type: str
+ choices: [ 'yes', 'no' ]
+ ignorefailures:
+ description:
+ - Specify to change the access key whether the new access key works or not.
+ - Valid when I(state=present) to update an existing Amazon S3 account.
+ - Parameter is allowed only when I(accesskeyid) and I(secretaccesskey) are entered.
+ type: bool
+ mode:
+ description:
+ - Specifies the new or modified cloud account mode.
+ - Valid when I(state=present) to update an existing Amazon S3 account.
+ type: str
+ choices: [ import, normal ]
+ importsystem:
+ description:
+ - Specifies that the system's data be imported.
+ - Valid when I(state=present) to update an existing Amazon S3 account.
+ type: str
+ refresh:
+ description:
+ - Specifies a refresh of the system import candidates.
+ - If the account is in import mode, this parameter specifies a refresh of the data available for import.
+ type: bool
+ resetusagehistory:
+ description:
+ - Resets the usage history (to 0).
+ - Storage consumption that reflects the space that is consumed on the cloud account is cumulative,
+ which means that it remains in the current day row (the 0th row).
+ - Valid when I(state=present) to update an existing Amazon S3 account.
+ type: bool
+ validate_certs:
+ description:
+ - Validates certification.
+ default: false
+ type: bool
+author:
+ - Sanjaikumaar M (@sanjaikumaar)
+notes:
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = '''
+- name: Configure Amazon S3 account
+ ibm.storage_virtualize.ibm_sv_manage_awss3_cloudaccount:
+ clustername: "{{cluster}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ name: awss3
+ bucketprefix: "{{bucketprefix}}"
+ accesskeyid: "{{accesskeyid}}"
+ secretaccesskey: "{{secretaccesskey}}"
+ state: present
+- name: Update Amazon S3 account configuration
+ ibm.storage_virtualize.ibm_sv_manage_awss3_cloudaccount:
+ clustername: "{{cluster}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ name: awss3
+ upbandwidthmbits: "{{upbandwidthmbits}}"
+ downbandwidthmbits: "{{downbandwidthmbits}}"
+ state: present
+- name: Update Amazon S3 account mode to import
+ ibm.storage_virtualize.ibm_sv_manage_awss3_cloudaccount:
+ clustername: "{{cluster}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ name: awss3
+ mode: import
+ importsystem: 123456789
+ state: present
+- name: Delete Amazon S3 account configuration
+ ibm.storage_virtualize.ibm_sv_manage_awss3_cloudaccount:
+ clustername: "{{cluster}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ name: awss3
+ state: absent
+'''
+
+RETURN = '''#'''
+
+from traceback import format_exc
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import (
+ IBMSVCRestApi, svc_argument_spec,
+ get_logger
+)
+from ansible.module_utils._text import to_native
+
+
+class IBMSVAWSS3:
+
+ def __init__(self):
+ argument_spec = svc_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(
+ type='str',
+ choices=['present', 'absent'],
+ required=True
+ ),
+ name=dict(
+ type='str',
+ required=True
+ ),
+ old_name=dict(
+ type='str'
+ ),
+ bucketprefix=dict(
+ type='str',
+ ),
+ accesskeyid=dict(
+ type='str',
+ no_log=False
+ ),
+ secretaccesskey=dict(
+ type='str',
+ no_log=True
+ ),
+ upbandwidthmbits=dict(
+ type='str'
+ ),
+ downbandwidthmbits=dict(
+ type='str'
+ ),
+ region=dict(
+ type='str'
+ ),
+ encrypt=dict(
+ type='str',
+ choices=['yes', 'no']
+ ),
+ ignorefailures=dict(
+ type='bool'
+ ),
+ mode=dict(
+ type='str',
+ choices=['import', 'normal']
+ ),
+ importsystem=dict(
+ type='str'
+ ),
+ refresh=dict(
+ type='bool'
+ ),
+ resetusagehistory=dict(
+ type='bool'
+ ),
+ )
+ )
+
+ self.module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ # Required parameters
+ self.state = self.module.params.get('state')
+ self.name = self.module.params.get('name')
+ self.old_name = self.module.params.get('old_name', '')
+ self.bucketprefix = self.module.params.get('bucketprefix', '')
+ self.accesskeyid = self.module.params.get('accesskeyid', '')
+ self.secretaccesskey = self.module.params.get('secretaccesskey')
+ self.upbandwidthmbits = self.module.params.get('upbandwidthmbits', '')
+ self.downbandwidthmbits = self.module.params.get('downbandwidthmbits', '')
+ self.region = self.module.params.get('region', '')
+ self.encrypt = self.module.params.get('encrypt')
+ # ignorefailures will be allowed only when access and secretkey are entered
+ self.ignorefailures = self.module.params.get('ignorefailures')
+ self.mode = self.module.params.get('mode')
+ self.importsystem = self.module.params.get('importsystem')
+ self.refresh = self.module.params.get('refresh')
+ self.resetusagehistory = self.module.params.get('resetusagehistory')
+
+ self.basic_checks()
+
+ # logging setup
+ self.log_path = self.module.params['log_path']
+ log = get_logger(self.__class__.__name__, self.log_path)
+ self.log = log.info
+
+ # Dynamic variables
+ self.changed = False
+ self.msg = ''
+ self.aws_data = {}
+
+ self.restapi = IBMSVCRestApi(
+ module=self.module,
+ clustername=self.module.params['clustername'],
+ domain=self.module.params['domain'],
+ username=self.module.params['username'],
+ password=self.module.params['password'],
+ validate_certs=self.module.params['validate_certs'],
+ log_path=self.log_path,
+ token=self.module.params['token']
+ )
+
+ def basic_checks(self):
+ if not self.name:
+ self.module.fail_json(msg='Missing mandatory parameter: name')
+
+ if self.state == 'present':
+ if self.accesskeyid:
+ if not self.secretaccesskey:
+ self.module.fail_json(msg='Parameters required together: accesskeyid, secretaccesskey')
+
+ elif self.state == 'absent':
+ invalids = ('bucketprefix', 'accesskeyid', 'secretaccesskey', 'upbandwidthmbits',
+ 'downbandwidthmbits', 'region', 'encrypt', 'ignorefailures', 'mode', 'importsystem',
+ 'refresh', 'resetusagehistory', 'old_name')
+ invalid_exists = ', '.join((var for var in invalids if getattr(self, var) not in {'', None}))
+
+ if invalid_exists:
+ self.module.fail_json(
+ msg='state=absent but following parameters have been passed: {0}'.format(invalid_exists)
+ )
+
+ def create_validation(self):
+ if self.old_name:
+ self.rename_validation({})
+
+ required = ('bucketprefix', 'accesskeyid', 'secretaccesskey')
+ required_not_exists = ', '.join((var for var in required if not getattr(self, var)))
+
+ if required_not_exists:
+ self.module.fail_json(msg='Missing mandatory parameter: {0}'.format(required_not_exists))
+
+ invalids = ('ignorefailures', 'mode', 'importsystem',
+ 'refresh', 'resetusagehistory')
+ invalid_exists = ', '.join((var for var in invalids if getattr(self, var) not in {'', None}))
+
+ if invalid_exists:
+ self.module.fail_json(
+ msg='Following parameters not supported during creation: {0}'.format(invalid_exists)
+ )
+
+ def rename_validation(self, updates):
+ if self.old_name and self.name:
+
+ if self.name == self.old_name:
+ self.module.fail_json(msg='New name and old name should be different.')
+
+ new = self.is_aws_account_exists()
+ existing = self.is_aws_account_exists(name=self.old_name)
+
+ if existing:
+ if new:
+ self.module.fail_json(
+ msg='Cloud account ({0}) already exists for the given new name.'.format(self.name)
+ )
+ else:
+ updates['name'] = self.name
+ else:
+ if not new:
+ self.module.fail_json(
+ msg='Cloud account ({0}) does not exists for the given old name.'.format(self.old_name)
+ )
+ else:
+ self.module.exit_json(
+ msg='Cloud account ({0}) already renamed. No modifications done.'.format(self.name)
+ )
+
+ def is_aws_account_exists(self, name=None):
+ result = {}
+ cmd = 'lscloudaccount'
+ name = name if name else self.name
+
+ data = self.restapi.svc_obj_info(cmd=cmd, cmdopts=None, cmdargs=[name])
+ if isinstance(data, list):
+ for d in data:
+ result.update(d)
+ else:
+ result = data
+
+ self.aws_data = result
+
+ return result
+
+ def aws_account_probe(self):
+ updates = {}
+ if self.encrypt and self.encrypt != self.aws_data.get('encrypt', ''):
+ self.module.fail_json(msg='Parameter not supported for update operation: encrypt')
+
+ if self.bucketprefix and self.bucketprefix != self.aws_data.get('awss3_bucket_prefix', ''):
+ self.module.fail_json(msg='Parameter not supported for update operation: bucketprefix')
+
+ if self.region and self.region != self.aws_data.get('awss3_region', ''):
+ self.module.fail_json(msg='Parameter not supported for update operation: region')
+
+ self.rename_validation(updates)
+
+ params = [
+ ('upbandwidthmbits', self.aws_data.get('up_bandwidth_mbits')),
+ ('downbandwidthmbits', self.aws_data.get('down_bandwidth_mbits')),
+ ('mode', self.aws_data.get('mode')),
+ ('importsystem', self.aws_data.get('import_system_name')),
+ ]
+
+ for k, v in params:
+ if getattr(self, k) and getattr(self, k) != v:
+ updates[k] = getattr(self, k)
+
+ if self.accesskeyid and self.aws_data.get('awss3_access_key_id') != self.accesskeyid:
+ updates['accesskeyid'] = self.accesskeyid
+ updates['secretaccesskey'] = self.secretaccesskey
+
+ # ignorefailures can be provided only when accesskeyid and secretaccesskey are given
+ if self.ignorefailures:
+ updates['ignorefailures'] = self.ignorefailures
+
+ if self.refresh and self.aws_data.get('refreshing') == 'no':
+ updates['refresh'] = self.refresh
+
+ # Can't validate the below parameters.
+ if self.resetusagehistory:
+ updates['resetusagehistory'] = self.resetusagehistory
+
+ return updates
+
+ def create_aws_account(self):
+ self.create_validation()
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ cmd = 'mkcloudaccountawss3'
+ cmdopts = {
+ 'name': self.name,
+ 'bucketprefix': self.bucketprefix,
+ 'accesskeyid': self.accesskeyid,
+ 'secretaccesskey': self.secretaccesskey
+ }
+
+ params = {'upbandwidthmbits', 'downbandwidthmbits', 'region', 'encrypt'}
+
+ cmdopts.update(
+ dict((key, getattr(self, key)) for key in params if getattr(self, key))
+ )
+
+ self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None, timeout=20)
+ self.log('Cloud account (%s) created', self.name)
+ self.changed = True
+
+ def update_aws_account(self, updates):
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ name = self.old_name if self.old_name else self.name
+ self.restapi.svc_run_command('chcloudaccountawss3', updates, cmdargs=[name], timeout=20)
+ self.changed = True
+
+ def delete_aws_account(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ self.restapi.svc_run_command('rmcloudaccount', cmdopts=None, cmdargs=[self.name], timeout=20)
+ self.changed = True
+
+ def apply(self):
+ if self.is_aws_account_exists(name=self.old_name):
+ if self.state == 'present':
+ modifications = self.aws_account_probe()
+ if modifications:
+ self.update_aws_account(modifications)
+ self.msg = 'AWS account ({0}) updated'.format(self.name)
+ else:
+ self.msg = 'AWS account ({0}) already exists. No modifications done.'.format(self.name)
+ else:
+ self.delete_aws_account()
+ self.msg = 'AWS account ({0}) deleted.'.format(self.name)
+ else:
+ if self.state == 'absent':
+ self.msg = 'AWS account ({0}) does not exist'.format(self.name)
+ else:
+ self.create_aws_account()
+ self.msg = 'AWS account ({0}) created.'.format(self.name)
+
+ if self.module.check_mode:
+ self.msg = 'skipping changes due to check mode.'
+
+ self.module.exit_json(
+ changed=self.changed,
+ msg=self.msg
+ )
+
+
+def main():
+ v = IBMSVAWSS3()
+ try:
+ v.apply()
+ except Exception as e:
+ v.log('Exception in apply(): \n%s', format_exc())
+ v.module.fail_json(msg='Module failed. Error [%s].' % to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_sv_manage_cloud_backups.py b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_sv_manage_cloud_backups.py
new file mode 100644
index 000000000..4763bed08
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_sv_manage_cloud_backups.py
@@ -0,0 +1,391 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2022 IBM CORPORATION
+# Author(s): Sanjaikumaar M <sanjaikumaar.m@ibm.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_sv_manage_cloud_backups
+short_description: This module configures and manages cloud backups on IBM Storage Virtualize family systems
+version_added: '1.11.0'
+description:
+ - Ansible interface to manage backupvolume, backupvolumegroup, and rmvolumebackupgeneration commands.
+options:
+ clustername:
+ description:
+ - The hostname or management IP of the Storage Virtualize system.
+ required: true
+ type: str
+ domain:
+ description:
+ - Domain for the Storage Virtualize system.
+ - Valid when hostname is used for the parameter I(clustername).
+ type: str
+ username:
+ description:
+ - REST API username for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ password:
+ description:
+ - REST API password for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ token:
+ description:
+ - The authentication token to verify a user on the Storage Virtualize system.
+ - To generate a token, use the M(ibm.storage_virtualize.ibm_svc_auth) module.
+ type: str
+ log_path:
+ description:
+ - Path of debug log file.
+ type: str
+ state:
+ description:
+ - Creates (C(present)) or deletes (C(absent)) a cloud backup.
+ choices: [ present, absent ]
+ required: true
+ type: str
+ volume_name:
+ description:
+ - Specifies the volume name for the volume being backed up.
+ - The parameters I(volume_name) and I(volumegroup_name) are mutually exclusive.
+ type: str
+ volumegroup_name:
+ description:
+ - Specifies the volumegroup name for the volume to back up.
+ - The parameters I(volume_name) and I(volumegroup_name) are mutually exclusive.
+ - Applies when I(state=present) to create cloud backups of all the volume group members.
+ - Cloud backup must be enabled on all the volume group members to execute this.
+ type: str
+ full:
+ description:
+ - Specifies that the snapshot generation for the volume should be a full snapshot.
+ - Applies when I(state=present).
+ type: bool
+ volume_UID:
+ description:
+ - Specifies the volume UID to delete a cloud backup of the volume.
+ - The value for a volume UID must be a value in the range 0-32.
+ - The parameters I(volume_UID) and I(volume_name) are mutually exclusive.
+ - Applies when I(state=absent) to delete cloud backups.
+ type: str
+ generation:
+ description:
+ - Specifies the snapshot generation ID that needs to be deleted for the volume.
+ - If the specified generation is for a snapshot operation that is in progress,
+ that snapshot operation is canceled.
+ - Applies when I(state=absent) to delete a generation of a volume backup.
+ - The parameters I(all) and I(generation) are mutually exclusive.
+ - Either I(generation) or I(all) is required to delete cloud backup.
+ type: int
+ all:
+ description:
+ - Specifies to delete all cloud backup generations.
+ - Applies when I(state=absent) to delete a backup.
+ - The parameters I(all) and I(generation) are mutually exclusive.
+ - Either I(generation) or I(all) is required to delete cloud backup.
+ type: bool
+ validate_certs:
+ description:
+ - Validates certification.
+ default: false
+ type: bool
+author:
+ - Sanjaikumaar M (@sanjaikumaar)
+notes:
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = '''
+- name: Create cloud backup of volume
+ ibm.storage_virtualize.ibm_sv_manage_cloud_backups:
+ clustername: "{{cluster}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ volume_name: vol1
+ full: true
+ state: present
+- name: Create cloud backup of volumegroup
+ ibm.storage_virtualize.ibm_sv_manage_cloud_backups:
+ clustername: "{{cluster}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ volumegroup_name: VG1
+ full: true
+ state: present
+- name: Delete cloud backup
+ ibm.storage_virtualize.ibm_sv_manage_cloud_backups:
+ clustername: "{{cluster}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ volume_UID: 6005076400B70038E00000000000001C
+ all: true
+ state: absent
+'''
+
+RETURN = '''#'''
+
+from traceback import format_exc
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import (
+ IBMSVCRestApi, svc_argument_spec,
+ get_logger
+)
+from ansible.module_utils._text import to_native
+
+
+class IBMSVCloudBackup:
+
+ def __init__(self):
+ argument_spec = svc_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(
+ type='str',
+ choices=['present', 'absent'],
+ required=True
+ ),
+ volume_name=dict(
+ type='str'
+ ),
+ volumegroup_name=dict(
+ type='str'
+ ),
+ generation=dict(
+ type='int',
+ ),
+ volume_UID=dict(
+ type='str',
+ ),
+ full=dict(
+ type='bool',
+ ),
+ all=dict(
+ type='bool'
+ )
+ )
+ )
+
+ self.module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ # Required parameters
+ self.state = self.module.params['state']
+
+ # Optional parameters
+ self.volume_name = self.module.params.get('volume_name')
+ self.volumegroup_name = self.module.params.get('volumegroup_name')
+ self.full = self.module.params.get('full')
+
+ # Parameters for deletion
+ self.volume_UID = self.module.params.get('volume_UID', '')
+ self.generation = self.module.params.get('generation', '')
+ self.all = self.module.params.get('all')
+
+ self.basic_checks()
+
+ # logging setup
+ self.log_path = self.module.params['log_path']
+ log = get_logger(self.__class__.__name__, self.log_path)
+ self.log = log.info
+
+ # Dynamic variables
+ self.changed = False
+ self.msg = ''
+
+ self.restapi = IBMSVCRestApi(
+ module=self.module,
+ clustername=self.module.params['clustername'],
+ domain=self.module.params['domain'],
+ username=self.module.params['username'],
+ password=self.module.params['password'],
+ validate_certs=self.module.params['validate_certs'],
+ log_path=self.log_path,
+ token=self.module.params['token']
+ )
+
+ def basic_checks(self):
+ if self.state == 'present':
+ if self.volume_UID:
+ self.module.fail_json(msg='Parameter not supported during creation: volume_UID')
+
+ if self.volume_name and self.volumegroup_name:
+ self.module.fail_json(msg='Mutually exclusive parameters: volume_name, volumegroup_name')
+
+ if not self.volumegroup_name and not self.volume_name:
+ self.module.fail_json(
+ msg='One of these parameter required to create backup: volume_name, volumegroup_name')
+
+ invalids = ('generation', 'all')
+ invalid_exists = ', '.join((var for var in invalids if getattr(self, var) not in {'', None}))
+ if invalid_exists:
+ self.module.fail_json(
+ msg='Following parameters not supported during creation: {0}'.format(invalid_exists)
+ )
+ else:
+ if self.volume_name and self.volume_UID:
+ self.module.fail_json(msg='Mutually exclusive parameters: volume_name, volume_UID')
+
+ if not self.volume_name and not self.volume_UID:
+ self.module.fail_json(msg='One of these parameter required to delete backup: volume_name, volume_UID')
+
+ if self.generation and self.all:
+ self.module.fail_json(msg='Mutually exclusive parameters: generation, all')
+
+ if self.generation in {'', None} and self.all in {'', None}:
+ self.module.fail_json(msg='One of the following parameter is required: generation, all')
+
+ if self.volumegroup_name:
+ self.module.fail_json(msg='Parameter not supported during deletion: volumegroup_name')
+
+ if self.full not in {'', None}:
+ self.module.fail_json(msg='Parameter not supported during deletion: full')
+
+ def check_source(self):
+ result = {}
+ if self.volumegroup_name:
+ cmd = 'lsvolumegroup'
+ cmdargs = [self.volumegroup_name]
+ cmdopts = None
+ elif self.volume_name and self.state == 'present':
+ cmd = 'lsvdisk'
+ cmdargs = [self.volume_name]
+ cmdopts = None
+ else:
+ cmd = 'lsvolumebackupgeneration'
+ cmdargs = None
+ cmdopts = {}
+
+ if self.volume_UID:
+ self.var = self.volume_UID
+ cmdopts['uid'] = self.volume_UID
+ else:
+ self.var = self.volume_name
+ cmdopts['volume'] = self.volume_name
+
+ data = self.restapi.svc_obj_info(cmd=cmd, cmdopts=cmdopts, cmdargs=cmdargs)
+ if isinstance(data, list):
+ for d in data:
+ result.update(d)
+ else:
+ result = data
+
+ if self.state == 'present':
+ return not result
+ else:
+ return result
+
+ def create_cloud_backup(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ cmdopts = {}
+ if self.volume_name:
+ cmd = 'backupvolume'
+ cmdargs = [self.volume_name]
+ self.msg = 'Cloud backup ({0}) created'.format(self.volume_name)
+ else:
+ cmd = 'backupvolumegroup'
+ cmdargs = [self.volumegroup_name]
+ self.msg = 'Cloud backup ({0}) created'.format(self.volumegroup_name)
+
+ if self.full:
+ cmdopts['full'] = True
+
+ response = self.restapi._svc_token_wrap(cmd, cmdopts, cmdargs=cmdargs)
+ self.log("create_cloud_backup response=%s", response)
+ self.changed = True
+
+ if response['out']:
+ if b'CMMVC9083E' in response['out']:
+ self.msg = 'CMMVC9083E: Volume is not ready to perform any operation right now.'
+ self.changed = False
+ elif b'CMMVC8753E' in response['out']:
+ self.msg = 'Backup already in progress.'
+ self.changed = False
+ else:
+ self.msg = response
+ self.module.fail_json(msg=self.msg)
+
+ self.log(self.msg)
+
+ def delete_cloud_backup(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ cmd = 'rmvolumebackupgeneration'
+ cmdopts = {}
+ if self.volume_name:
+ cmdopts['volume'] = self.volume_name
+ var = self.volume_name
+ self.msg = 'Cloud backup ({0}) deleted'.format(self.volume_name)
+ else:
+ cmdopts['uid'] = self.volume_UID
+ var = self.volume_UID
+ self.msg = 'Cloud backup ({0}) deleted'.format(self.volume_UID)
+
+ if self.generation:
+ cmdopts['generation'] = self.generation
+
+ if self.all not in {'', None}:
+ cmdopts['all'] = self.all
+
+ response = self.restapi._svc_token_wrap(cmd, cmdopts=cmdopts, cmdargs=None)
+ self.log('response=%s', response)
+ self.changed = True
+
+ if response['out']:
+ if b'CMMVC9104E' in response['out']:
+ self.changed = False
+ self.msg = 'CMMVC9104E: Volume ({0}) is not ready to perform any operation right now.'.format(var)
+ elif b'CMMVC9090E' in response['out']:
+ self.changed = False
+ self.msg = 'Cloud backup generation already deleted.'
+ else:
+ self.module.fail_json(msg=response)
+
+ self.log(self.msg)
+
+ def apply(self):
+ if self.check_source():
+ if self.state == 'present':
+ self.module.fail_json(msg='Volume (or) Volumegroup does not exist.')
+ else:
+ self.delete_cloud_backup()
+ else:
+ if self.state == 'absent':
+ self.msg = 'Backup ({0}) does not exist for the given name/UID.'.format(self.var)
+ self.log(self.msg)
+ else:
+ self.create_cloud_backup()
+
+ if self.module.check_mode:
+ self.msg = 'skipping changes due to check mode.'
+ self.log(self.msg)
+
+ self.module.exit_json(
+ changed=self.changed,
+ msg=self.msg
+ )
+
+
+def main():
+ v = IBMSVCloudBackup()
+ try:
+ v.apply()
+ except Exception as e:
+ v.log('Exception in apply(): \n%s', format_exc())
+ v.module.fail_json(msg='Module failed. Error [%s].' % to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_sv_manage_fc_partnership.py b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_sv_manage_fc_partnership.py
new file mode 100644
index 000000000..0de815b14
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_sv_manage_fc_partnership.py
@@ -0,0 +1,419 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2023 IBM CORPORATION
+# Author(s): Sanjaikumaar M <sanjaikumaar.m@ibm.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_sv_manage_fc_partnership
+short_description: This module configures and manages Fibre Channel (FC) partnership on IBM Storage Virtualize family systems
+version_added: '1.12.0'
+description:
+ - Ansible interface to manage mkfcpartnership, chpartnership, and rmpartnership commands.
+options:
+ state:
+ description:
+ - Creates or updates (C(present)) or removes (C(absent)) a FC partnership.
+ choices: [ 'present', 'absent' ]
+ required: true
+ type: str
+ clustername:
+ description:
+ - The hostname or management IP of the Storage Virtualize system.
+ type: str
+ required: true
+ remote_clustername:
+ description:
+ - The hostname or management IP of the remote Storage Virtualize system.
+ type: str
+ domain:
+ description:
+ - Domain for the Storage Virtualize system.
+ - Valid when hostname is used for the parameter I(clustername).
+ type: str
+ remote_domain:
+ description:
+ - Domain for the remote Storage Virtualize system.
+ - Valid when hostname is used for the parameter I(remote_clustername).
+ type: str
+ username:
+ description:
+ - REST API username for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ remote_username:
+ description:
+ - REST API username for the remote Storage Virtualize system.
+ - The parameters I(remote_username) and I(remote_password) are required if not using I(remote_token) to authenticate a user.
+ type: str
+ password:
+ description:
+ - REST API password for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ remote_password:
+ description:
+ - REST API password for the remote Storage Virtualize system.
+ - The parameters I(remote_username) and I(remote_password) are required if not using I(remote_token) to authenticate a user.
+ type: str
+ token:
+ description:
+ - The authentication token to verify a user on the Storage Virtualize system.
+ - To generate a token, use the M(ibm.storage_virtualize.ibm_svc_auth) module.
+ type: str
+ remote_token:
+ description:
+ - The authentication token to verify a user on the remote Storage Virtualize system.
+ - To generate a token, use the M(ibm.storage_virtualize.ibm_svc_auth) module.
+ type: str
+ remote_system:
+ description:
+ - Specifies the partner system ID or name.
+ type: str
+ linkbandwidthmbits:
+ description:
+ - Specifies the aggregate bandwidth of the remote copy link between two clustered systems (systems)
+ in megabits per second (Mbps). The value must be in the range of 1 - 100000.
+ - Valid when I(state=present).
+ type: str
+ backgroundcopyrate:
+ description:
+ - Specifies the maximum percentage of aggregate link bandwidth that can be used for background
+ copy operations. The value must be in the range of 0 - 100. The default value is 50.
+ - Valid when I(state=present).
+ type: str
+ pbrinuse:
+ description:
+ - Specifies whether policy-based replication will be used on the partnership.
+ - Valid when I(state=present) to update a partnership.
+ type: str
+ choices: [ 'yes', 'no' ]
+ start:
+ description:
+ - Specifies to start a partnership.
+ - Valid when I(state=present).
+ type: bool
+ stop:
+ description:
+ - Specifies to stop a partnership.
+ - Valid when I(state=present) to update a partnership.
+ type: bool
+ validate_certs:
+ description:
+ - Validates certification for the local Storage Virtualize system.
+ default: false
+ type: bool
+ remote_validate_certs:
+ description:
+ - Validates certification for the remote Storage Virtualize system.
+ default: false
+ type: bool
+ log_path:
+ description:
+ - Path of debug log file.
+ type: str
+author:
+ - Sanjaikumaar M (@sanjaikumaar)
+notes:
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = '''
+- name: Create an FC partnership and start the partnership
+ ibm.storage_virtualize.ibm_sv_manage_fc_partnership:
+ clustername: "{{clustername}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ remote_clustername: "{{remote_clustername}}"
+ remote_username: "{{remote_username}}"
+ remote_password: "{{remote_password}}"
+ remote_system: "{{remote_system}}"
+ linkbandwidthmbits: 50
+ backgroundcopyrate: 50
+ start: True
+ state: present
+- name: Update an FC partnership and stop the partnership
+ ibm.storage_virtualize.ibm_sv_manage_fc_partnership:
+ clustername: "{{clustername}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ remote_clustername: "{{remote_clustername}}"
+ remote_username: "{{remote_username}}"
+ remote_password: "{{remote_password}}"
+ remote_system: "{{remote_system}}"
+ linkbandwidthmbits: 40
+ backgroundcopyrate: 20
+ stop: True
+ state: present
+- name: Delete the FC partnership
+ ibm.storage_virtualize.ibm_sv_manage_fc_partnership:
+ clustername: "{{cluster}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ remote_clustername: "{{remote_clustername}}"
+ remote_username: "{{remote_username}}"
+ remote_password: "{{remote_password}}"
+ remote_system: "{{remote_system}}"
+ state: absent
+'''
+
+RETURN = '''#'''
+
+from traceback import format_exc
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import (
+ IBMSVCRestApi, svc_argument_spec,
+ get_logger
+)
+from ansible.module_utils._text import to_native
+
+
+class IBMSVFCPartnership:
+
+ def __init__(self):
+ argument_spec = svc_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type='str', required=True, choices=['present', 'absent']),
+ remote_system=dict(type='str'),
+ linkbandwidthmbits=dict(type='str'),
+ backgroundcopyrate=dict(type='str'),
+ remote_clustername=dict(type='str'),
+ remote_domain=dict(type='str', default=None),
+ remote_username=dict(type='str'),
+ remote_password=dict(type='str', no_log=True),
+ remote_token=dict(type='str', no_log=True),
+ remote_validate_certs=dict(type='bool', default=False),
+ pbrinuse=dict(type='str', choices=['yes', 'no']),
+ start=dict(type='bool'),
+ stop=dict(type='bool')
+ )
+ )
+
+ self.module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ # Required
+ self.state = self.module.params['state']
+ self.remote_system = self.module.params['remote_system']
+
+ # Optional
+ self.linkbandwidthmbits = self.module.params.get('linkbandwidthmbits', '')
+ self.backgroundcopyrate = self.module.params.get('backgroundcopyrate', '')
+ self.start = self.module.params.get('start', '')
+ self.stop = self.module.params.get('stop', '')
+ self.pbrinuse = self.module.params.get('pbrinuse', '')
+ self.remote_clustername = self.module.params.get('remote_clustername', '')
+ self.remote_username = self.module.params.get('remote_username', '')
+ self.remote_password = self.module.params.get('remote_password', '')
+ self.remote_domain = self.module.params.get('remote_domain', '')
+ self.remote_token = self.module.params.get('remote_token', '')
+ self.remote_validate_certs = self.module.params.get('remote_validate_certs', '')
+
+ self.basic_checks()
+
+ # logging setup
+ self.log_path = self.module.params['log_path']
+ log = get_logger(self.__class__.__name__, self.log_path)
+ self.log = log.info
+
+ # Dynamic variables
+ self.changed = False
+ self.msg = ''
+ self.local_id = None
+ self.partnership_data = None
+
+ self.restapi = IBMSVCRestApi(
+ module=self.module,
+ clustername=self.module.params['clustername'],
+ domain=self.module.params['domain'],
+ username=self.module.params['username'],
+ password=self.module.params['password'],
+ validate_certs=self.module.params['validate_certs'],
+ log_path=self.log_path,
+ token=self.module.params['token']
+ )
+
+ if self.remote_clustername:
+ self.remote_restapi = IBMSVCRestApi(
+ module=self.module,
+ clustername=self.remote_clustername,
+ domain=self.remote_domain,
+ username=self.remote_username,
+ password=self.remote_password,
+ validate_certs=self.remote_validate_certs,
+ log_path=self.log_path,
+ token=self.remote_token
+ )
+
+ def basic_checks(self):
+ if not self.remote_system:
+ self.module.fail_json(msg='Missing mandatory parameter: remote_system')
+
+ if self.state == 'present':
+ if self.start and self.stop:
+ self.module.fail_json(msg='Mutually exclusive parameters: start, stop')
+ else:
+ invalids = ('linkbandwidthmbits', 'backgroundcopyrate', 'start', 'stop', 'pbrinuse')
+ invalid_exists = ', '.join((var for var in invalids if getattr(self, var) not in {'', None}))
+ if invalid_exists:
+ self.module.fail_json(
+ msg='Following parameters not supported during deletion: {0}'.format(invalid_exists)
+ )
+
+ def create_validation(self, validate):
+ if validate:
+ if not self.remote_clustername:
+ self.module.fail_json(msg='Following paramter is mandatory during creation: remote_clustername')
+
+ if not self.linkbandwidthmbits:
+ self.module.fail_json(msg='Missing mandatory parameter: linkbandwidthmbits')
+
+ invalids = ('stop', 'pbrinuse')
+ invalid_exists = ', '.join((var for var in invalids if getattr(self, var) not in {'', None}))
+ if invalid_exists:
+ self.module.fail_json(
+ msg='Following parameters not supported during creation: {0}'.format(invalid_exists)
+ )
+
+ def is_partnership_exists(self, restapi, cluster):
+ result = {}
+ data = restapi.svc_obj_info(
+ cmd='lspartnership',
+ cmdopts=None,
+ cmdargs=[cluster]
+ )
+
+ if isinstance(data, list):
+ for d in data:
+ result.update(d)
+ else:
+ result = data
+
+ self.partnership_data = result
+
+ return result
+
+ def create_fc_partnership(self, restapi, cluster, validate):
+ self.create_validation(validate)
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ cmd = 'mkfcpartnership'
+ cmdopts = {
+ 'linkbandwidthmbits': self.linkbandwidthmbits
+ }
+
+ if self.backgroundcopyrate:
+ cmdopts['backgroundcopyrate'] = self.backgroundcopyrate
+
+ restapi.svc_run_command(cmd, cmdopts, cmdargs=[cluster])
+ self.log('FC partnership (%s) created', cluster)
+
+ if self.start:
+ restapi.svc_run_command('chpartnership', {'start': True}, [cluster])
+ self.log('FC partnership (%s) started', cluster)
+
+ self.changed = True
+
+ def probe_fc_partnership(self):
+ probe_data = {}
+ if self.linkbandwidthmbits and self.linkbandwidthmbits != self.partnership_data.get('link_bandwidth_mbits'):
+ probe_data['linkbandwidthmbits'] = self.linkbandwidthmbits
+
+ if self.backgroundcopyrate and self.backgroundcopyrate != self.partnership_data.get('background_copy_rate'):
+ probe_data['backgroundcopyrate'] = self.backgroundcopyrate
+
+ if self.pbrinuse and self.pbrinuse != self.partnership_data.get('pbr_in_use'):
+ probe_data['pbrinuse'] = self.pbrinuse
+
+ if self.start in {True, False}:
+ probe_data['start'] = self.start
+
+ if self.stop in {True, False}:
+ probe_data['stop'] = self.stop
+
+ return probe_data
+
+ def updated_fc_partnership(self, modification, restapi, cluster):
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ cmd = 'chpartnership'
+ if 'start' in modification:
+ modification.pop('start')
+ restapi.svc_run_command(cmd, {'start': True}, [cluster])
+ self.changed = True
+
+ if 'stop' in modification:
+ modification.pop('stop')
+ restapi.svc_run_command(cmd, {'stop': True}, [cluster])
+ self.changed = True
+
+ if modification:
+ restapi.svc_run_command(cmd, modification, [cluster])
+ self.changed = True
+
+ def delete_fc_partnership(self, restapi, cluster):
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ restapi.svc_run_command('rmpartnership', None, [cluster])
+ self.changed = True
+
+ def apply(self):
+ subset = [(self.restapi, self.remote_system, True)]
+ if self.remote_clustername:
+ system_data = self.restapi.svc_obj_info('lssystem', None, None)
+ self.local_id = system_data['id']
+ subset.append((self.remote_restapi, self.local_id, False))
+
+ for restapi, cluster, validate in subset:
+ if self.is_partnership_exists(restapi, cluster):
+ if self.state == 'present':
+ modifications = self.probe_fc_partnership()
+ if modifications:
+ self.updated_fc_partnership(modifications, restapi, cluster)
+ self.msg += 'FC partnership ({0}) updated. '.format(cluster)
+ else:
+ self.msg += 'FC partnership ({0}) already exists. No modifications done. '.format(cluster)
+ else:
+ self.delete_fc_partnership(restapi, cluster)
+ self.msg += 'FC partnership ({0}) deleted. '.format(cluster)
+ else:
+ if self.state == 'absent':
+ self.msg += 'FC partnership ({0}) does not exist. No modifications done. '.format(cluster)
+ else:
+ self.create_fc_partnership(restapi, cluster, validate)
+ self.msg += 'FC partnership to the cluster({0}) created. '.format(cluster)
+
+ if self.module.check_mode:
+ self.msg = 'skipping changes due to check mode.'
+ self.log(self.msg)
+
+ self.module.exit_json(
+ changed=self.changed,
+ msg=self.msg
+ )
+
+
+def main():
+ v = IBMSVFCPartnership()
+ try:
+ v.apply()
+ except Exception as e:
+ v.log('Exception in apply(): \n%s', format_exc())
+ v.module.fail_json(msg='Module failed. Error [%s].' % to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_sv_manage_fcportsetmember.py b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_sv_manage_fcportsetmember.py
new file mode 100644
index 000000000..2a02c04fd
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_sv_manage_fcportsetmember.py
@@ -0,0 +1,250 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2023 IBM CORPORATION
+# Author(s): Sudheesh Reddy Satti<Sudheesh.Reddy.Satti@ibm.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_sv_manage_fcportsetmember
+short_description: This module manages addition or removal of ports to or from the Fibre Channel(FC) portsets on IBM Storage Virtualize family systems.
+version_added: "1.12.0"
+description:
+ - Ansible interface to manage 'addfcportsetmember' and 'rmfcportsetmember' commands.
+options:
+ clustername:
+ description:
+ - The hostname or management IP of the Storage Virtualize system.
+ required: true
+ type: str
+ domain:
+ description:
+ - Domain for the Storage Virtualize system.
+ - Valid when hostname is used for the parameter I(clustername).
+ type: str
+ username:
+ description:
+ - REST API username for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ password:
+ description:
+ - REST API password for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ token:
+ description:
+ - The authentication token to verify a user on the Storage Virtualize system.
+ - To generate a token, use the M(ibm.storage_virtualize.ibm_svc_auth) module.
+ type: str
+ log_path:
+ description:
+ - Path of debug log file.
+ type: str
+ state:
+ description:
+ - Add (C(present)) or Remove (C(absent)) the FC port ID to or from the FC portset
+ choices: [ present, absent ]
+ required: true
+ type: str
+ name:
+ description:
+ - Specifies the name of the FC portset.
+ type: str
+ required: true
+ fcportid:
+ description:
+ - Specifies the Fibre Channel I/O port ID of the port.
+ - The value can be a decimal number 1 to the maximum number of FC I/O ports.
+ type: str
+ required: true
+ validate_certs:
+ description:
+ - Validates certification.
+ default: false
+ type: bool
+author:
+ - Sudheesh S (@sudheesh-reddy)
+notes:
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = '''
+- name: Add port ID to the portset
+ ibm.storage_virtualize.ibm_sv_manage_fcportsetmember:
+ clustername: "{{cluster}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ name: portset1
+ fcportid: 3
+ state: present
+- name: Remove port ID from portset
+ ibm.storage_virtualize.ibm_sv_manage_fcportsetmember:
+ clustername: "{{cluster}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ name: portset1
+ fcportid: 3
+ state: absent
+'''
+
+RETURN = '''#'''
+
+from traceback import format_exc
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import (
+ IBMSVCRestApi, svc_argument_spec,
+ get_logger
+)
+from ansible.module_utils._text import to_native
+
+
+class IBMSVFCPortsetmember:
+
+ def __init__(self):
+ argument_spec = svc_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(
+ type='str',
+ required=True,
+ choices=['present', 'absent']
+ ),
+ name=dict(
+ type='str',
+ required=True,
+ ),
+ fcportid=dict(
+ type='str',
+ required=True,
+ )
+ )
+ )
+
+ self.module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ # Required parameters
+ self.name = self.module.params['name']
+ self.state = self.module.params['state']
+ self.fcportid = self.module.params['fcportid']
+
+ self.basic_checks()
+
+ # Varialbe to cache data
+ self.fcportsetmember_details = None
+
+ # logging setup
+ self.log_path = self.module.params['log_path']
+ log = get_logger(self.__class__.__name__, self.log_path)
+ self.log = log.info
+ self.changed = False
+ self.msg = ''
+
+ self.restapi = IBMSVCRestApi(
+ module=self.module,
+ clustername=self.module.params['clustername'],
+ domain=self.module.params['domain'],
+ username=self.module.params['username'],
+ password=self.module.params['password'],
+ validate_certs=self.module.params['validate_certs'],
+ log_path=self.log_path,
+ token=self.module.params['token']
+ )
+
+ def basic_checks(self):
+ if not self.name:
+ self.module.fail_json(msg='Missing mandatory parameter: name')
+
+ if not self.fcportid:
+ self.module.fail_json(msg='Missing mandatory parameter: fcportid ')
+
+ def is_fcportsetmember_exists(self):
+ merged_result = {}
+ cmd = 'lsfcportsetmember'
+ cmdopts = {
+ "filtervalue": "portset_name={0}:fc_io_port_id={1}".format(self.name, self.fcportid)
+ }
+ data = self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
+
+ if isinstance(data, list):
+ for d in data:
+ merged_result.update(d)
+ else:
+ merged_result = data
+
+ self.fcportsetmember_details = merged_result
+
+ return merged_result
+
+ def add_fcportsetmember(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ cmd = 'addfcportsetmember'
+ cmdopts = {
+ 'portset': self.name,
+ 'fcioportid': self.fcportid
+ }
+
+ self.changed = True
+ self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
+ self.log('FCPortsetmember (%s) mapping is created with fcportid (%s) successfully.', self.name, self.fcportid)
+
+ def remove_fcportsetmember(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ cmd = 'rmfcportsetmember'
+ cmdopts = {
+ 'portset': self.name,
+ 'fcioportid': self.fcportid
+ }
+
+ self.changed = True
+ self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
+ self.log('FCPortsetmember (%s) mapping is removed from fcportid (%s) successfully.', self.name, self.fcportid)
+
+ def apply(self):
+
+ fcportsetmember_data = self.is_fcportsetmember_exists()
+
+ if fcportsetmember_data:
+ if self.state == 'present':
+ self.msg = 'FCPortsetmember ({0}) mapping with fcportid ({1}) is already exist.'.format(self.name, self.fcportid)
+ else:
+ self.remove_fcportsetmember()
+ self.msg = 'FCPortsetmember ({0}) mapping is removed from fcportid ({1}) successfully.'.format(self.name, self.fcportid)
+ else:
+ if self.state == 'absent':
+ self.msg = 'FCPortsetmember ({0}) mapping does not exist with fcportid ({1}). No modifications done.'.format(self.name, self.fcportid)
+ else:
+ self.add_fcportsetmember()
+ self.msg = 'FCPortsetmember ({0}) mapping is created with fcportid ({1}) successfully.'.format(self.name, self.fcportid)
+
+ if self.module.check_mode:
+ self.msg = 'skipping changes due to check mode.'
+
+ self.module.exit_json(
+ changed=self.changed,
+ msg=self.msg
+ )
+
+
+def main():
+ v = IBMSVFCPortsetmember()
+ try:
+ v.apply()
+ except Exception as e:
+ v.log("Exception in apply(): \n%s", format_exc())
+ v.module.fail_json(msg="Module failed. Error [%s]." % to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_sv_manage_ip_partnership.py b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_sv_manage_ip_partnership.py
new file mode 100644
index 000000000..ecbe6bf3b
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_sv_manage_ip_partnership.py
@@ -0,0 +1,637 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2022 IBM CORPORATION
+# Author(s): Sreshtant Bohidar <sreshtant.bohidar@ibm.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_sv_manage_ip_partnership
+short_description: This module manages IP partnerships on IBM Storage Virtualize family systems
+description:
+ - Ansible interface to manage 'mkippartnership', 'rmpartnership', and 'chpartnership' commands
+ on local and remote systems.
+version_added: "1.9.0"
+options:
+ state:
+ description:
+ - Creates or updates (C(present)) or removes (C(absent)) an IP partnership.
+ choices: [ 'present', 'absent' ]
+ required: true
+ type: str
+ clustername:
+ description:
+ - The hostname or management IP of the Storage Virtualize system.
+ type: str
+ required: true
+ remote_clustername:
+ description:
+ - The hostname or management IP of the remote Storage Virtualize system.
+ type: str
+ required: true
+ domain:
+ description:
+ - Domain for the Storage Virtualize system.
+ - Valid when hostname is used for the parameter I(clustername).
+ type: str
+ remote_domain:
+ description:
+ - Domain for the remote Storage Virtualize system.
+ - Valid when hostname is used for the parameter I(remote_clustername).
+ type: str
+ username:
+ description:
+ - REST API username for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ remote_username:
+ description:
+ - REST API username for the remote Storage Virtualize system.
+ - The parameters I(remote_username) and I(remote_password) are required if not using I(remote_token) to authenticate a user.
+ type: str
+ password:
+ description:
+ - REST API password for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ remote_password:
+ description:
+ - REST API password for the remote Storage Virtualize system.
+ - The parameters I(remote_username) and I(remote_password) are required if not using I(remote_token) to authenticate a user.
+ type: str
+ token:
+ description:
+ - The authentication token to verify a user on the Storage Virtualize system.
+ - To generate a token, use the M(ibm.storage_virtualize.ibm_svc_auth) module.
+ type: str
+ remote_token:
+ description:
+ - The authentication token to verify a user on the remote Storage Virtualize system.
+ - To generate a token, use the M(ibm.storage_virtualize.ibm_svc_auth) module.
+ type: str
+ remote_clusterip:
+ description:
+ - Specifies the partner system IP address, either IPv4 or IPv6.
+ - Required when I(state=present), to create an IP partnership.
+ type: str
+ remote_cluster_id:
+ description:
+ - Specifies the partnership ID of the partner system.
+ - Required when I(state=present), to modify an existing IP partnership.
+ - Required when I(state=absent), to remove an existing IP partnership.
+ type: str
+ type:
+ description:
+ - Specifies the Internet Protocol (IP) address format for the partnership.
+ - Valid when I(state=present).
+ choices: [ 'ipv4', 'ipv6' ]
+ type: str
+ compressed:
+ description:
+ - Specifies whether compression is enabled for this partnership.
+ - Valid when I(state=present).
+ choices: [ 'yes', 'no' ]
+ type: str
+ linkbandwidthmbits:
+ description:
+ - Specifies the aggregate bandwidth of the RC link between two clustered systems (systems)
+ in megabits per second (Mbps). This is a numeric value from 1 through 100000.
+ - Valid when I(state=present).
+ type: int
+ backgroundcopyrate:
+ description:
+ - Specifies the maximum percentage of aggregate link bandwidth that can be used for background
+ copy operations. This is a numeric value from 0 through 100. The default value is 50.
+ - Valid when I(state=present).
+ type: int
+ link1:
+ description:
+ - Specifies the portset name to be used for WAN link 1 of the Storage Virtualize system.
+ - Valid when I(state=present), to create an IP partnership.
+ type: str
+ remote_link1:
+ description:
+ - Specifies the portset name to be used for WAN link 1 of the remote Storage Virtualize system.
+ - Valid when I(state=present), to create an IP partnership.
+ type: str
+ link2:
+ description:
+ - Specifies the portset name to be used for WAN link 2 of the Storage Virtualize system.
+ - Valid when I(state=present), to create an IP partnership.
+ type: str
+ remote_link2:
+ description:
+ - Specifies the portset name to be used for WAN link 2 of the remote Storage Virtualize system.
+ - Valid when I(state=present), to create an IP partnership.
+ type: str
+ validate_certs:
+ description:
+ - Validates certification for the local Storage Virtualize system.
+ default: false
+ type: bool
+ remote_validate_certs:
+ description:
+ - Validates certification for the remote Storage Virtualize system.
+ default: false
+ type: bool
+ log_path:
+ description:
+ - Path of debug log file.
+ type: str
+author:
+ - Sreshtant Bohidar(@Sreshtant-Bohidar)
+notes:
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = '''
+- name: Create an IP partnership
+ ibm.storage_virtualize.ibm_sv_manage_ip_partnership:
+ clustername: "{{ clustername }}"
+ domain: "{{ domain }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ remote_clustername: "{{ remote_clustername }}"
+ remote_domain: "{{ remote_domain }}"
+ remote_username: "{{ remote_username }}"
+ remote_password: "{{ remote_password }}"
+ log_path: "/tmp/debug.log"
+ remote_clusterip: "{{ partner_ip }}"
+ type: "ipv4"
+ linkbandwidthmbits: 100
+ backgroundcopyrate: 50
+ compressed: yes
+ link1: "{{ portsetname }}"
+ remote_link1: "{{ remote_portsetname}}"
+ state: "present"
+- name: Update an IP partnership
+ ibm.storage_virtualize.ibm_sv_manage_ip_partnership:
+ clustername: "{{ clustername }}"
+ domain: "{{ domain }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ remote_clustername: "{{ remote_clustername }}"
+ remote_domain: "{{ remote_domain }}"
+ remote_username: "{{ remote_username }}"
+ remote_password: "{{ remote_password }}"
+ log_path: "/tmp/debug.log"
+ remote_cluster_id: "{{ cluster_id }}"
+ linkbandwidthmbits: 110
+ backgroundcopyrate: 60
+ compressed: no
+ state: "present"
+- name: Remove an IP partnership
+ ibm.storage_virtualize.ibm_sv_manage_ip_partnership:
+ clustername: "{{ clustername }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ remote_clustername: "{{ remote_clustername }}"
+ remote_username: "{{ remote_username }}"
+ remote_password: "{{ remote_password }}"
+ log_path: "/tmp/debug.log"
+ remote_cluster_id: "{{ cluster_id }}"
+ state: "absent"
+'''
+
+RETURN = '''#'''
+
+from traceback import format_exc
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi, svc_argument_spec, get_logger
+from ansible.module_utils._text import to_native
+
+
+class IBMSVCIPPartnership(object):
+
+ def __init__(self):
+ argument_spec = svc_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type='str', required=True, choices=['present', 'absent']),
+ type=dict(type='str', required=False, choices=['ipv4', 'ipv6']),
+ remote_clusterip=dict(type='str', required=False),
+ remote_cluster_id=dict(type='str', required=False),
+ compressed=dict(type='str', required=False, choices=['yes', 'no']),
+ linkbandwidthmbits=dict(type='int', required=False),
+ backgroundcopyrate=dict(type='int', required=False),
+ link1=dict(type='str', required=False),
+ link2=dict(type='str', required=False),
+ remote_clustername=dict(type='str', required=True),
+ remote_domain=dict(type='str', default=None),
+ remote_username=dict(type='str'),
+ remote_password=dict(type='str', no_log=True),
+ remote_token=dict(type='str', no_log=True),
+ remote_validate_certs=dict(type='bool', default=False),
+ remote_link1=dict(type='str', required=False),
+ remote_link2=dict(type='str', required=False)
+ )
+ )
+ self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+ # logging setup
+ log_path = self.module.params['log_path']
+ log = get_logger(self.__class__.__name__, log_path)
+ self.log = log.info
+ # Required
+ self.state = self.module.params['state']
+ self.remote_clustername = self.module.params['remote_clustername']
+ # Optional
+ self.remote_username = self.module.params.get('remote_username', '')
+ self.remote_password = self.module.params.get('remote_password', '')
+ self.remote_clusterip = self.module.params.get('remote_clusterip', '')
+ self.remote_cluster_id = self.module.params.get('remote_cluster_id', '')
+ self.type = self.module.params.get('type', '')
+ self.compressed = self.module.params.get('compressed', '')
+ self.linkbandwidthmbits = self.module.params.get('linkbandwidthmbits', '')
+ self.backgroundcopyrate = self.module.params.get('backgroundcopyrate', '')
+ self.link1 = self.module.params.get('link1', '')
+ self.link2 = self.module.params.get('link2', '')
+ self.remote_domain = self.module.params.get('remote_domain', '')
+ self.remote_token = self.module.params.get('remote_token', '')
+ self.remote_validate_certs = self.module.params.get('remote_validate_certs', '')
+ self.remote_link1 = self.module.params.get('remote_link1', '')
+ self.remote_link2 = self.module.params.get('remote_link2', '')
+ # Internal variable
+ self.changed = False
+ # creating an instance of IBMSVCRestApi for local system
+ self.restapi_local = IBMSVCRestApi(
+ module=self.module,
+ clustername=self.module.params['clustername'],
+ domain=self.module.params['domain'],
+ username=self.module.params['username'],
+ password=self.module.params['password'],
+ validate_certs=self.module.params['validate_certs'],
+ log_path=log_path,
+ token=self.module.params['token']
+ )
+ # creating an instance of IBMSVCRestApi for remote system
+ self.restapi_remote = IBMSVCRestApi(
+ module=self.module,
+ clustername=self.module.params['remote_clustername'],
+ domain=self.module.params['remote_domain'],
+ username=self.module.params['remote_username'],
+ password=self.module.params['remote_password'],
+ validate_certs=self.module.params['remote_validate_certs'],
+ log_path=log_path,
+ token=self.module.params['remote_token']
+ )
+
+ # perform some basic checks
+ def basic_checks(self):
+ # Handling for mandatory parameter 'state'
+ if not self.state:
+ self.module.fail_json(msg="Missing mandatory parameter: state")
+
+ # Parameter validation for creating IP partnership
+ def create_parameter_validation(self):
+ if self.state == 'present':
+ if not self.remote_clusterip:
+ self.module.fail_json(msg="Missing required parameter during creation: remote_clusterip")
+ if not (self.link1 or self.link2):
+ self.module.fail_json(msg="At least one is required during creation: link1 or link2")
+ if not (self.remote_link1 or self.remote_link2):
+ self.module.fail_json(msg="At least one is required during creation: remote_link1 or remote_link2")
+
+ # Parameter validation for deleting IP partnership
+ def delete_parameter_validation(self):
+ if self.state == 'absent':
+ if not self.remote_cluster_id:
+ self.module.fail_json(msg="Missing required parameter during deletion: remote_cluster_id")
+ unsupported = []
+ check_list = {
+ 'remote_clusterip': self.remote_clusterip,
+ 'type': self.type,
+ 'linkbandwidthmbits': self.linkbandwidthmbits,
+ 'backgroundcopyrate': self.backgroundcopyrate,
+ 'compressed': self.compressed,
+ 'link1': self.link1,
+ 'link2': self.link2,
+ 'remote_link1': self.remote_link1,
+ 'remote_link2': self.remote_link2
+ }
+ self.log('%s', check_list)
+ for key, value in check_list.items():
+ if value:
+ unsupported.append(key)
+ if unsupported:
+ self.module.fail_json(msg="Unsupported parameter during deletion: {0}".format(unsupported))
+
+ # Parameter validation for updating IP partnership
+ def update_parameter_validation(self):
+ if self.state == 'present' and not self.remote_cluster_id:
+ self.module.fail_json(msg="Missing required parameter during updation: remote_cluster_id")
+
+ # fetch system IP address
+ def get_ip(self, rest_obj):
+ system_data = rest_obj.svc_obj_info('lssystem', {}, None)
+ if system_data and 'console_IP' in system_data and ':' in system_data['console_IP']:
+ return system_data['console_IP'].split(':')[0]
+ else:
+ self.module.fail_json(msg="Failed to fetch the IP address of local system")
+
+ # get all partnership
+ def get_all_partnership(self, rest_obj):
+ return rest_obj.svc_obj_info(cmd='lspartnership', cmdopts=None, cmdargs=[])
+
+ # filter partnership data
+ def filter_partnership(self, data, ip):
+ return list(
+ filter(
+ lambda item: item['cluster_ip'] == ip, data
+ )
+ )
+
+ # get local partnership
+ def get_local_partnership(self, data):
+ return list(
+ filter(
+ lambda item: item['location'] == 'local', data
+ )
+ )
+
+ # get all the attributes of a partnership
+ def get_partnership_detail(self, rest_obj, id):
+ return rest_obj.svc_obj_info(cmd='lspartnership', cmdopts=None, cmdargs=[id])
+
+ # fetch partnership data
+ def gather_all_validation_data(self, rest_local, rest_remote):
+ local_data = {}
+ remote_data = {}
+ local_ip = self.get_ip(rest_local)
+ local_id = None
+ # while updating and removing existing partnership
+ if self.remote_cluster_id:
+ local_data = self.get_partnership_detail(rest_local, self.remote_cluster_id)
+ all_local_partnership = self.get_all_partnership(rest_local)
+ if all_local_partnership:
+ local_partnership_data = self.get_local_partnership(all_local_partnership)
+ if local_partnership_data:
+ local_id = local_partnership_data[0]['id']
+ remote_data = self.get_partnership_detail(rest_remote, local_id)
+ # while creating partnership
+ else:
+ all_local_partnership = self.get_all_partnership(rest_local)
+ if all_local_partnership:
+ if self.remote_clusterip:
+ local_filter = self.filter_partnership(
+ all_local_partnership,
+ self.remote_clusterip
+ )
+ if local_filter:
+ local_data = self.get_partnership_detail(rest_local, local_filter[0]['id'])
+
+ all_remote_partnership = self.get_all_partnership(rest_remote)
+ if all_remote_partnership:
+ remote_filter = self.filter_partnership(
+ all_remote_partnership,
+ local_ip
+ )
+ if remote_filter:
+ remote_data = self.get_partnership_detail(rest_remote, remote_filter[0]['id'])
+ return local_ip, local_id, local_data, remote_data
+
+ # create a new IP partnership
+ def create_partnership(self, location, cluster_ip):
+ # when executed with check mode
+ if self.module.check_mode:
+ self.changed = True
+ return
+ rest_api = None
+ cmd = 'mkippartnership'
+ cmd_opts = {
+ 'clusterip': cluster_ip
+ }
+ if self.type:
+ cmd_opts['type'] = self.type
+ if self.compressed:
+ cmd_opts['compressed'] = self.compressed
+ if self.linkbandwidthmbits:
+ cmd_opts['linkbandwidthmbits'] = self.linkbandwidthmbits
+ if self.backgroundcopyrate:
+ cmd_opts['backgroundcopyrate'] = self.backgroundcopyrate
+ if location == 'local':
+ rest_api = self.restapi_local
+ if self.link1:
+ cmd_opts['link1'] = self.link1
+ if self.link2:
+ cmd_opts['link2'] = self.link2
+ if location == 'remote':
+ rest_api = self.restapi_remote
+ if self.remote_link1:
+ cmd_opts['link1'] = self.remote_link1
+ if self.remote_link2:
+ cmd_opts['link2'] = self.remote_link2
+ result = rest_api.svc_run_command(cmd, cmd_opts, cmdargs=None)
+ self.log("Create result '%s'.", result)
+ if result == '':
+ self.changed = True
+ self.log("Created IP partnership for %s system.", location)
+ else:
+ self.module.fail_json(msg="Failed to create IP partnership for cluster ip {0}".format(cluster_ip))
+
+ # delete an existing partnership
+ def remove_partnership(self, location, id):
+ # when executed with check mode
+ if self.module.check_mode:
+ self.changed = True
+ return
+ rest_api = None
+ cmd = 'rmpartnership'
+ if location == 'local':
+ rest_api = self.restapi_local
+ if location == 'remote':
+ rest_api = self.restapi_remote
+ rest_api.svc_run_command(cmd, {}, [id])
+ self.log('Deleted partnership with name %s.', id)
+ self.changed = True
+
+ # probe a partnership
+ def probe_partnership(self, local_data, remote_data):
+ modify_local, modify_remote = {}, {}
+ # unsupported parameters while updating
+ unsupported = []
+ if self.link1:
+ if local_data and local_data['link1'] != self.link1:
+ unsupported.append('link1')
+ if self.link2:
+ if local_data and local_data['link2'] != self.link2:
+ unsupported.append('link2')
+ if self.remote_link1:
+ if remote_data and remote_data['link1'] != self.remote_link1:
+ unsupported.append('remote_link1')
+ if self.remote_link2:
+ if remote_data and remote_data['link2'] != self.remote_link2:
+ unsupported.append('remote_link2')
+ if self.type:
+ if (local_data and local_data['type'] != self.type) or (remote_data and remote_data['type'] != self.type):
+ unsupported.append('type')
+ if unsupported:
+ self.module.fail_json(msg="parameters {0} cannot be updated".format(unsupported))
+ # supported parameters while updating
+ if self.compressed:
+ if local_data and local_data['compressed'] != self.compressed:
+ modify_local['compressed'] = self.compressed
+ if remote_data and remote_data['compressed'] != self.compressed:
+ modify_remote['compressed'] = self.compressed
+ if self.linkbandwidthmbits:
+ if local_data and int(local_data['link_bandwidth_mbits']) != self.linkbandwidthmbits:
+ modify_local['linkbandwidthmbits'] = self.linkbandwidthmbits
+ if remote_data and int(remote_data['link_bandwidth_mbits']) != self.linkbandwidthmbits:
+ modify_remote['linkbandwidthmbits'] = self.linkbandwidthmbits
+ if self.backgroundcopyrate:
+ if local_data and int(local_data['background_copy_rate']) != self.backgroundcopyrate:
+ modify_local['backgroundcopyrate'] = self.backgroundcopyrate
+ if remote_data and int(remote_data['background_copy_rate']) != self.backgroundcopyrate:
+ modify_remote['backgroundcopyrate'] = self.backgroundcopyrate
+ if self.remote_clusterip:
+ if local_data and self.remote_clusterip != local_data['cluster_ip']:
+ modify_local['clusterip'] = self.remote_clusterip
+ return modify_local, modify_remote
+
+ # start a partnership
+ def start_partnership(self, rest_object, id):
+ cmd = 'chpartnership'
+ cmd_opts = {
+ 'start': True
+ }
+ cmd_args = [id]
+ rest_object.svc_run_command(cmd, cmd_opts, cmd_args)
+ self.log('Started the partnership %s.', id)
+
+ # stop a partnership
+ def stop_partnership(self, rest_object, id):
+ cmd = 'chpartnership'
+ cmd_opts = {
+ 'stop': True
+ }
+ cmd_args = [id]
+ rest_object.svc_run_command(cmd, cmd_opts, cmd_args)
+ self.log('Stopped partnership %s.', id)
+
+ # update a partnership
+ def update_partnership(self, location, id, modify_data):
+ # when executed with check mode
+ if self.module.check_mode:
+ self.changed = True
+ return
+ cmd = 'chpartnership'
+ cmd_args = [id]
+ rest_object = None
+ if location == 'local':
+ rest_object = self.restapi_local
+ if location == 'remote':
+ rest_object = self.restapi_remote
+ if 'compressed' in modify_data or 'clusterip' in modify_data:
+ cmd_opts = {}
+ if 'compressed' in modify_data:
+ cmd_opts['compressed'] = modify_data['compressed']
+ if 'clusterip' in modify_data and location == 'local':
+ cmd_opts['clusterip'] = modify_data['clusterip']
+ if cmd_opts:
+ # stop the partnership
+ self.stop_partnership(rest_object, id)
+ # perform update operation
+ rest_object.svc_run_command(cmd, cmd_opts, cmd_args)
+ # start the partnership
+ self.start_partnership(rest_object, id)
+ self.changed = True
+ if 'linkbandwidthmbits' in modify_data or 'backgroundcopyrate' in modify_data:
+ cmd_opts = {}
+ if 'linkbandwidthmbits' in modify_data:
+ cmd_opts['linkbandwidthmbits'] = modify_data['linkbandwidthmbits']
+ if 'backgroundcopyrate' in modify_data:
+ cmd_opts['backgroundcopyrate'] = modify_data['backgroundcopyrate']
+ if cmd_opts:
+ # perform the update operation
+ rest_object.svc_run_command(cmd, cmd_opts, cmd_args)
+ self.changed = True
+
+ def apply(self):
+ msg = ''
+ self.basic_checks()
+ local_ip, local_id, local_data, remote_data = self.gather_all_validation_data(self.restapi_local, self.restapi_remote)
+ if self.state == 'present':
+ if local_data and remote_data:
+ modify_local, modify_remote = self.probe_partnership(local_data, remote_data)
+ if modify_local or modify_remote:
+ self.update_parameter_validation()
+ if modify_local:
+ self.update_partnership('local', self.remote_cluster_id, modify_local)
+ msg += 'IP partnership updated on local system.'
+ else:
+ msg += 'IP partnership already exists on local system.'
+ if modify_remote:
+ self.update_partnership('remote', local_id, modify_remote)
+ msg += ' IP partnership updated on remote system.'
+ else:
+ msg += ' IP partnership already exists on remote system.'
+ else:
+ msg += 'IP partnership already exists on both local and remote system.'
+ elif local_data and not remote_data:
+ response = self.probe_partnership(local_data, remote_data)
+ modify_local = response[0]
+ self.create_parameter_validation()
+ self.create_partnership('remote', local_ip)
+ msg += 'IP partnership created on remote system.'
+ if modify_local:
+ self.update_parameter_validation()
+ self.update_partnership('local', self.remote_cluster_id, modify_local)
+ msg += ' IP partnership updated on {0} system.'.format(['local'])
+ else:
+ msg += ' IP Partnership already exists on local system.'
+ elif not local_data and remote_data:
+ response = self.probe_partnership(local_data, remote_data)
+ modify_remote = response[1]
+ self.create_parameter_validation()
+ self.create_partnership('local', self.remote_clusterip)
+ msg += ' IP partnership created on local system.'
+ if modify_remote:
+ self.update_partnership('remote', local_id, modify_remote)
+ msg += 'IP partnership updated on {0} system.'.format(['remote'])
+ else:
+ msg += 'IP Partnership already exists on remote system.'
+ elif not local_data and not remote_data:
+ self.create_parameter_validation()
+ self.create_partnership('local', self.remote_clusterip)
+ self.create_partnership('remote', local_ip)
+ msg = 'IP partnership created on both local and remote system.'
+ elif self.state == 'absent':
+ # parameter vaidation while removing partnership
+ self.delete_parameter_validation()
+ # removal of partnership on both local and remote system
+ if local_data and remote_data:
+ self.remove_partnership('local', self.remote_cluster_id)
+ self.remove_partnership('remote', local_id)
+ msg += 'IP partnership deleted from both local and remote system.'
+ elif local_data and not remote_data:
+ self.remove_partnership('local', self.remote_cluster_id)
+ msg += 'IP partnership deleted from local system.'
+ msg += ' IP partnership does not exists on remote system.'
+ elif not local_data and remote_data:
+ self.remove_partnership('remote', local_id)
+ msg += 'IP partnership deleted from remote system.'
+ msg += ' IP partnership does not exists on local system.'
+ elif not local_data and not remote_data:
+ msg += 'IP partnership does not exists on both local and remote system. No modifications done.'
+
+ if self.module.check_mode:
+ msg = 'Skipping changes due to check mode.'
+
+ self.module.exit_json(msg=msg, changed=self.changed)
+
+
+def main():
+ v = IBMSVCIPPartnership()
+ try:
+ v.apply()
+ except Exception as e:
+ v.log("Exception in apply(): \n%s", format_exc())
+ v.module.fail_json(msg="Module failed. Error [%s]." % to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_sv_manage_provisioning_policy.py b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_sv_manage_provisioning_policy.py
new file mode 100644
index 000000000..c3e7c8692
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_sv_manage_provisioning_policy.py
@@ -0,0 +1,343 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2022 IBM CORPORATION
+# Author(s): Sanjaikumaar M <sanjaikumaar.m@ibm.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_sv_manage_provisioning_policy
+short_description: This module configures and manages provisioning policies on IBM Storage Virtualize family systems
+version_added: '1.10.0'
+description:
+ - Ansible interface to manage mkprovisioningpolicy, chprovisioningpolicy, and rmprovisioningpolicy commands.
+options:
+ clustername:
+ description:
+ - The hostname or management IP of the Storage Virtualize system.
+ required: true
+ type: str
+ domain:
+ description:
+ - Domain for the Storage Virtualize system.
+ - Valid when hostname is used for the parameter I(clustername).
+ type: str
+ username:
+ description:
+ - REST API username for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ password:
+ description:
+ - REST API password for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ token:
+ description:
+ - The authentication token to verify a user on the Storage Virtualize system.
+ - To generate a token, use the M(ibm.storage_virtualize.ibm_svc_auth) module.
+ type: str
+ log_path:
+ description:
+ - Path of debug log file.
+ type: str
+ state:
+ description:
+ - Creates, updates (C(present)), or deletes (C(absent)) a provisioning policy.
+ choices: [ present, absent ]
+ required: true
+ type: str
+ name:
+ description:
+ - Specifies the name of the provisioning policy.
+ - Specifies the new name during rename.
+ type: str
+ required: true
+ capacitysaving:
+ description:
+ - Specifies the policy capacity savings.
+ - Applies, when I(state=present), to create a provisioning policy.
+ choices: [ drivebased, thin, compressed ]
+ type: str
+ deduplicated:
+ description:
+ - Specifies when volumes should be deduplicated.
+ - Applicable when I(capacitysaving=thin) or I(capacitysaving=compressed).
+ default: false
+ type: bool
+ old_name:
+ description:
+ - Specifies the old name of the provisioning policy during renaming.
+ - Valid when I(state=present) to rename an existing policy.
+ type: str
+ validate_certs:
+ description:
+ - Validates certification.
+ default: false
+ type: bool
+author:
+ - Sanjaikumaar M (@sanjaikumaar)
+notes:
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = '''
+- name: Create provisioning policy
+ ibm.storage_virtualize.ibm_sv_manage_provisioning_policy:
+ clustername: "{{cluster}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ name: provisioning_policy0
+ capacitysaving: "compressed"
+ deduplicated: true
+ state: present
+- name: Rename provisioning policy
+ ibm.storage_virtualize.ibm_sv_manage_provisioning_policy:
+ clustername: "{{cluster}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ name: pp0
+ old_name: provisioning_policy0
+ state: present
+- name: Delete replication policy
+ ibm.storage_virtualize.ibm_sv_manage_provisioning_policy:
+ clustername: "{{cluster}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ name: pp0
+ state: absent
+'''
+
+RETURN = '''#'''
+
+from traceback import format_exc
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import (
+ IBMSVCRestApi, svc_argument_spec,
+ get_logger, strtobool
+)
+from ansible.module_utils._text import to_native
+
+
+class IBMSVProvisioningPolicy:
+
+ def __init__(self):
+ argument_spec = svc_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(
+ type='str',
+ required=True
+ ),
+ state=dict(
+ type='str',
+ choices=['present', 'absent'],
+ required=True
+ ),
+ capacitysaving=dict(
+ type='str',
+ choices=['drivebased', 'thin', 'compressed']
+ ),
+ deduplicated=dict(
+ type='bool',
+ default=False
+ ),
+ old_name=dict(
+ type='str',
+ ),
+ )
+ )
+
+ self.module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ # Required parameters
+ self.name = self.module.params['name']
+ self.state = self.module.params['state']
+
+ # Optional parameters
+ self.capacitysaving = self.module.params.get('capacitysaving')
+ self.deduplicated = self.module.params.get('deduplicated', False)
+ self.old_name = self.module.params.get('old_name', '')
+
+ self.basic_checks()
+
+ # logging setup
+ self.log_path = self.module.params['log_path']
+ log = get_logger(self.__class__.__name__, self.log_path)
+ self.log = log.info
+
+ # Dynamic variables
+ self.changed = False
+ self.msg = ''
+ self.pp_data = {}
+
+ self.restapi = IBMSVCRestApi(
+ module=self.module,
+ clustername=self.module.params['clustername'],
+ domain=self.module.params['domain'],
+ username=self.module.params['username'],
+ password=self.module.params['password'],
+ validate_certs=self.module.params['validate_certs'],
+ log_path=self.log_path,
+ token=self.module.params['token']
+ )
+
+ def basic_checks(self):
+ if self.state == 'present':
+ if not self.name:
+ self.module.fail_json(
+ msg='Mandatory parameter missing: name'
+ )
+ else:
+ unsupported = ('capacitysaving', 'deduplicated', 'old_name')
+ unsupported_exists = ','.join(field for field in unsupported if getattr(self, field))
+ if unsupported_exists:
+ self.module.fail_json(
+ msg='state=absent but following paramters passed: {0}'.format(unsupported_exists)
+ )
+
+ def create_validation(self):
+ if self.old_name:
+ self.rename_validation([])
+
+ if not self.capacitysaving:
+ self.module.fail_json(
+ msg='Mandatory parameter missing: capacitysaving'
+ )
+
+ def rename_validation(self, updates):
+ if self.old_name and self.name:
+ if self.name == self.old_name:
+ self.module.fail_json(msg='New name and old name should be different.')
+
+ new = self.is_pp_exists()
+ existing = self.is_pp_exists(name=self.old_name)
+
+ if existing:
+ if new:
+ self.module.fail_json(
+ msg='Provisioning policy ({0}) already exists for the given new name'.format(self.name)
+ )
+ else:
+ updates.append('name')
+ else:
+ if not new:
+ self.module.fail_json(
+ msg='Provisioning policy ({0}) does not exists for the given old name.'.format(self.old_name)
+ )
+ else:
+ self.module.exit_json(
+ msg='Provisioning policy ({0}) already renamed. No modifications done.'.format(self.name)
+ )
+
+ def is_pp_exists(self, name=None):
+ result = {}
+ name = name if name else self.name
+ cmd = 'lsprovisioningpolicy'
+ data = self.restapi.svc_obj_info(cmd=cmd, cmdopts=None, cmdargs=[name])
+
+ if isinstance(data, list):
+ for d in data:
+ result.update(d)
+ else:
+ result = data
+
+ self.pp_data = result
+
+ return result
+
+ def create_provisioning_policy(self):
+ self.create_validation()
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ cmd = 'mkprovisioningpolicy'
+ cmdopts = {
+ 'name': self.name,
+ 'capacitysaving': self.capacitysaving,
+ 'deduplicated': self.deduplicated
+ }
+
+ self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
+ self.log('Provisioning policy (%s) created', self.name)
+ self.changed = True
+
+ def provisioning_policy_probe(self):
+ updates = []
+ self.rename_validation(updates)
+ if self.capacitysaving:
+ capsav = 'none' if self.capacitysaving == 'drivebased' else self.capacitysaving
+ if capsav and capsav != self.pp_data.get('capacity_saving', ''):
+ self.module.fail_json(msg='Following paramter not applicable for update operation: capacitysaving')
+ if self.deduplicated and not strtobool(self.pp_data.get('deduplicated', 0)):
+ self.module.fail_json(msg='Following paramter not applicable for update operation: deduplicated')
+ return updates
+
+ def update_provisioning_policy(self, updates):
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ cmd = 'chprovisioningpolicy'
+ cmdopts = {
+ 'name': self.name
+ }
+ self.restapi.svc_run_command(cmd, cmdopts, cmdargs=[self.old_name])
+ self.log('Provisioning policy (%s) renamed', self.name)
+ self.changed = True
+
+ def delete_provisioning_policy(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ cmd = 'rmprovisioningpolicy'
+ self.restapi.svc_run_command(cmd, cmdopts=None, cmdargs=[self.name])
+ self.changed = True
+
+ def apply(self):
+ if self.is_pp_exists(name=self.old_name):
+ if self.state == 'present':
+ modifications = self.provisioning_policy_probe()
+ if any(modifications):
+ self.update_provisioning_policy(modifications)
+ self.msg = 'Provisioning policy ({0}) updated'.format(self.name)
+ else:
+ self.msg = 'Provisioning policy ({0}) already exists. No modifications done.'.format(self.name)
+ else:
+ self.delete_provisioning_policy()
+ self.msg = 'Provisioning policy ({0}) deleted'.format(self.name)
+ else:
+ if self.state == 'absent':
+ self.msg = 'Provisioning policy ({0}) does not exist.'.format(self.name)
+ else:
+ self.create_provisioning_policy()
+ self.msg = 'Provisioning policy ({0}) created.'.format(self.name)
+
+ if self.module.check_mode:
+ self.msg = 'skipping changes due to check mode.'
+
+ self.module.exit_json(
+ changed=self.changed,
+ msg=self.msg
+ )
+
+
+def main():
+ v = IBMSVProvisioningPolicy()
+ try:
+ v.apply()
+ except Exception as e:
+ v.log('Exception in apply(): \n%s', format_exc())
+ v.module.fail_json(msg='Module failed. Error [%s].' % to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_sv_manage_replication_policy.py b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_sv_manage_replication_policy.py
new file mode 100644
index 000000000..5733d5552
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_sv_manage_replication_policy.py
@@ -0,0 +1,339 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2022 IBM CORPORATION
+# Author(s): Sanjaikumaar M <sanjaikumaar.m@ibm.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_sv_manage_replication_policy
+short_description: This module configures and manages replication policies on IBM Storage Virtualize family systems
+version_added: '1.10.0'
+description:
+ - Ansible interface to manage mkreplicationpolicy, chreplicationpolicy, and rmreplicationpolicy commands.
+ - This module manages policy based replication.
+ - This module can be run on all IBM Storage Virtualize systems with version 8.5.2.1 or later.
+options:
+ clustername:
+ description:
+ - The hostname or management IP of the Storage Virtualize system.
+ required: true
+ type: str
+ domain:
+ description:
+ - Domain for the Storage Virtualize system.
+ - Valid when hostname is used for the parameter I(clustername).
+ type: str
+ username:
+ description:
+ - REST API username for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ password:
+ description:
+ - REST API password for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ token:
+ description:
+ - The authentication token to verify a user on the Storage Virtualize system.
+ - To generate a token, use the M(ibm.storage_virtualize.ibm_svc_auth) module.
+ type: str
+ log_path:
+ description:
+ - Path of debug log file.
+ type: str
+ state:
+ description:
+ - Creates, updates (C(present)), or deletes (C(absent)) a replication policy.
+ choices: [ present, absent ]
+ required: true
+ type: str
+ name:
+ description:
+ - Specifies the name of the replication policy.
+ type: str
+ required: true
+ topology:
+ description:
+ - Specifies the policy topology.
+ choices: [ 2-site-async-dr, 2-site-ha ]
+ type: str
+ location1system:
+ description:
+ - Specifies the name or ID of the system in location 1 of the topology.
+ type: str
+ location1iogrp:
+ description:
+ - Specifies the ID of the I/O group of the system in location 1 of the topology.
+ type: int
+ location2system:
+ description:
+ - Specifies the name or ID of the system in location 2 of the topology.
+ type: str
+ location2iogrp:
+ description:
+ - Specifies the ID of the I/O group of the system in location 2 of the topology.
+ type: int
+ rpoalert:
+ description:
+ - Specifies the RPO alert threshold in seconds.
+ The minimum value is 60 (1 minute) and the maximum value is 86400 (1 day).
+ - The value must be a multiple of 60 seconds.
+ type: int
+ validate_certs:
+ description:
+ - Validates certification.
+ default: false
+ type: bool
+author:
+ - Sanjaikumaar M (@sanjaikumaar)
+notes:
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = '''
+- name: Create replication policy
+ ibm.storage_virtualize.ibm_sv_manage_replication_policy:
+ clustername: "{{cluster}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ name: replication_policy0
+ topology: 2-site-async-dr
+ location1system: x.x.x.x
+ location1iogrp: 0
+ location2system: x.x.x.x
+ location2iogrp: 0
+ rpoalert: 60
+ state: present
+- name: Delete replication policy
+ ibm.storage_virtualize.ibm_sv_manage_replication_policy:
+ clustername: "{{cluster}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ name: replication_policy0
+ state: absent
+'''
+
+RETURN = '''#'''
+
+from traceback import format_exc
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import (
+ IBMSVCRestApi, svc_argument_spec,
+ get_logger
+)
+from ansible.module_utils._text import to_native
+
+
+class IBMSVReplicationPolicy:
+
+ def __init__(self):
+ argument_spec = svc_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(
+ type='str',
+ required=True
+ ),
+ state=dict(
+ type='str',
+ choices=['present', 'absent'],
+ required=True
+ ),
+ topology=dict(
+ type='str',
+ choices=['2-site-async-dr', '2-site-ha']
+ ),
+ location1system=dict(
+ type='str',
+ ),
+ location1iogrp=dict(
+ type='int',
+ ),
+ location2system=dict(
+ type='str',
+ ),
+ location2iogrp=dict(
+ type='int',
+ ),
+ rpoalert=dict(
+ type='int',
+ )
+ )
+ )
+
+ self.module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ # Required parameters
+ self.name = self.module.params['name']
+ self.state = self.module.params['state']
+
+ # Optional parameters
+ self.topology = self.module.params.get('topology', '')
+ self.location1system = self.module.params.get('location1system', '')
+ self.location1iogrp = self.module.params.get('location1iogrp', '')
+ self.location2system = self.module.params.get('location2system', '')
+ self.location2iogrp = self.module.params.get('location2iogrp', '')
+ self.rpoalert = self.module.params.get('rpoalert', '')
+
+ # logging setup
+ self.log_path = self.module.params['log_path']
+ log = get_logger(self.__class__.__name__, self.log_path)
+ self.log = log.info
+
+ self.basic_checks()
+
+ # Dynamic variables
+ self.changed = False
+ self.msg = ''
+ self.rp_data = {}
+
+ self.restapi = IBMSVCRestApi(
+ module=self.module,
+ clustername=self.module.params['clustername'],
+ domain=self.module.params['domain'],
+ username=self.module.params['username'],
+ password=self.module.params['password'],
+ validate_certs=self.module.params['validate_certs'],
+ log_path=self.log_path,
+ token=self.module.params['token']
+ )
+
+ def basic_checks(self):
+ if not self.name:
+ self.module.fail_json(
+ msg='Missing mandatory parameter: name'
+ )
+
+ if self.state == 'absent':
+ invalids = ('topology', 'location1system', 'location1iogrp', 'location2system', 'location2iogrp', 'rpoalert')
+ invalid_exists = ', '.join((var for var in invalids if not getattr(self, var) in {'', None}))
+
+ if invalid_exists:
+ self.module.fail_json(
+ msg='state=absent but following paramters have been passed: {0}'.format(invalid_exists)
+ )
+
+ def is_rp_exists(self):
+ result = {}
+ cmd = 'lsreplicationpolicy'
+ data = self.restapi.svc_obj_info(cmd=cmd, cmdopts=None, cmdargs=[self.name])
+
+ if isinstance(data, list):
+ for d in data:
+ result.update(d)
+ else:
+ result = data
+
+ self.rp_data = result
+
+ return result
+
+ def create_replication_policy(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ cmd = 'mkreplicationpolicy'
+ cmdopts = {
+ 'name': self.name,
+ 'topology': self.topology,
+ 'location1system': self.location1system,
+ 'location1iogrp': self.location1iogrp,
+ 'location2system': self.location2system,
+ 'location2iogrp': self.location2iogrp,
+ 'rpoalert': self.rpoalert,
+ }
+
+ self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
+ self.log('Replication policy (%s) created', self.name)
+ self.changed = True
+
+ def replication_policy_probe(self):
+ field_mappings = (
+ ('topology', self.rp_data.get('topology', '')),
+ ('location1system', (
+ ('location1_system_name', self.rp_data.get('location1_system_name', '')),
+ ('location1_system_id', self.rp_data.get('location1_system_id', ''))
+ )),
+ ('location1iogrp', self.rp_data.get('location1_iogrp_id', '')),
+ ('location2system', (
+ ('location2_system_name', self.rp_data.get('location2_system_name', '')),
+ ('location2_system_id', self.rp_data.get('location2_system_id', ''))
+ )),
+ ('location2iogrp', self.rp_data.get('location2_iogrp_id', '')),
+ ('rpoalert', self.rp_data.get('rpo_alert', ''))
+ )
+
+ self.log('replication policy probe data: %s', field_mappings)
+ for f, v in field_mappings:
+ current_value = str(getattr(self, f))
+ if current_value and f in {'location1system', 'location2system'}:
+ try:
+ next(iter(filter(lambda val: val[1] == current_value, v)))
+ except StopIteration:
+ self.module.fail_json(
+ msg='Policy modification is not supported. '
+ 'Please delete and recreate new policy.'
+ )
+ elif current_value and f in {'rpoalert'}:
+ if self.topology == '2-site-ha':
+ continue
+ elif current_value and current_value != v:
+ self.module.fail_json(
+ msg='Policy modification is not supported. '
+ 'Please delete and recreate new policy.'
+ )
+
+ def delete_replication_policy(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ cmd = 'rmreplicationpolicy'
+ self.restapi.svc_run_command(cmd, cmdopts=None, cmdargs=[self.name])
+ self.log('Replication policy (%s) deleted', self.name)
+ self.changed = True
+
+ def apply(self):
+ if self.is_rp_exists():
+ if self.state == 'present':
+ self.replication_policy_probe()
+ self.msg = 'Replication policy ({0}) already exists. No modifications done.'.format(self.name)
+ else:
+ self.delete_replication_policy()
+ self.msg = 'Replication policy ({0}) deleted'.format(self.name)
+ else:
+ if self.state == 'absent':
+ self.msg = 'Replication policy ({0}) does not exists.'.format(self.name)
+ else:
+ self.create_replication_policy()
+ self.msg = 'Replication policy ({0}) created.'.format(self.name)
+
+ if self.module.check_mode:
+ self.msg = 'skipping changes due to check mode.'
+
+ self.module.exit_json(
+ changed=self.changed,
+ msg=self.msg
+ )
+
+
+def main():
+ v = IBMSVReplicationPolicy()
+ try:
+ v.apply()
+ except Exception as e:
+ v.log('Exception in apply(): \n%s', format_exc())
+ v.module.fail_json(msg='Module failed. Error [%s].' % to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_sv_manage_security.py b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_sv_manage_security.py
new file mode 100644
index 000000000..3d8effc02
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_sv_manage_security.py
@@ -0,0 +1,340 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2023 IBM CORPORATION
+# Author(s): Sumit Kumar Gupta <sumit.gupta16@ibm.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ibm_sv_manage_security
+short_description: This module manages security options on IBM Storage Virtualize family storage systems
+description:
+ - Ansible interface to manage 'chsecurity' command.
+version_added: "2.1.0"
+options:
+ clustername:
+ description:
+ - The hostname or management IP of the Storage Virtualize storage system.
+ required: true
+ type: str
+ domain:
+ description:
+ - Domain for the Storage Virtualize storage system.
+ - Valid when hostname is used for the parameter I(clustername).
+ type: str
+ username:
+ description:
+ - REST API username for the Storage Virtualize storage system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ password:
+ description:
+ - REST API password for the Storage Virtualize storage system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ token:
+ description:
+ - The authentication token to verify a user on the Storage Virtualize storage system.
+ - To generate a token, use the ibm_svc_auth module.
+ type: str
+ sshprotocol:
+ description:
+ - Specifies the numeric value for the SSH security level setting in range 1 - 3.
+ - The level 1 Allows the following key exchange methods
+ curve25519-sha256
+ curve25519-sha256@libssh.org
+ ecdh-sha2-nistp256
+ ecdh-sha2-nistp384
+ ecdh-sha2-nistp521
+ diffie-hellman-group-exchange-sha256
+ diffie-hellman-group16-sha512
+ diffie-hellman-group18-sha512
+ diffie-hellman-group14-sha256
+ diffie-hellman-group14-sha1
+ diffie-hellman-group1-sha1
+ diffie-hellman-group-exchange-sha1
+ - The level 2 Allows the following key exchange methods
+ curve25519-sha256
+ curve25519-sha256@libssh.org
+ ecdh-sha2-nistp256
+ ecdh-sha2-nistp384
+ ecdh-sha2-nistp521
+ diffie-hellman-group-exchange-sha256
+ diffie-hellman-group16-sha512
+ diffie-hellman-group18-sha512
+ diffie-hellman-group14-sha256
+ diffie-hellman-group14-sha1
+ - The level 3 Allows the following key exchange methods
+ curve25519-sha256
+ curve25519-sha256@libssh.org
+ ecdh-sha2-nistp256
+ ecdh-sha2-nistp384
+ ecdh-sha2-nistp521
+ diffie-hellman-group-exchange-sha256
+ diffie-hellman-group16-sha512
+ diffie-hellman-group18-sha512
+ diffie-hellman-group14-sha256
+ type: int
+ guitimeout:
+ description:
+ - Specifies the amount of time (in minutes) in range 5 - 240 before a session expires and the user is logged
+ out of the GUI for inactivity.
+ type: int
+ clitimeout:
+ description:
+ - Specifies the amount of time (in minutes) in range 5 - 240 before a session expires and the user is logged
+ out of the CLI for inactivity.
+ type: int
+ minpasswordlength:
+ description:
+ - Specifies the minimum length requirement in range 6 -64 for user account passwords on the system.
+ type: int
+ passwordspecialchars:
+ description:
+ - Specifies number of minimum required special characters in range 0 - 3 in passwords for local users.
+ type: int
+ passworduppercase:
+ description:
+ - Specifies number of minimum uppercase characters in range 0 - 3 in passwords for local users.
+ type: int
+ passwordlowercase:
+ description:
+ - Specifies number of minimum lowercase characters in range 0 - 3 required in passwords for local users.
+ type: int
+ passworddigits:
+ description:
+ - Specifies mimimum number of digits in range 0 -3 required in passwords for local users.
+ type: int
+ checkpasswordhistory:
+ description:
+ - Specifies whether the system prevents the user from reusing a previous password.
+ choices: ['yes', 'no']
+ type: str
+ maxpasswordhistory:
+ description:
+ - Specifies the number of previous passwords in range 0 - 10 to compare with if checkpasswordhistory is
+ enabled. A value of 0 means that the new password is compared with the current password only.
+ type: int
+ minpasswordage:
+ description:
+ - Specifies the minimum number of days between password changes in range 0 -365. This setting is enforced if
+ checkpasswordhistory is enabled. This restriction is ignored if the password is expired. The setting does
+ nothing if the value is greater than the passwordexpiry value.
+ type: int
+ passwordexpiry:
+ description:
+ - Specifies the number of days in range 0 - 365 before a password expires. A value of 0 means the feature is
+ disabled and passwords do not expire.
+ type: int
+ expirywarning:
+ description:
+ - Specifies the number of days in range 0 -30 before a password expires to raise a warning. The warning is
+ displayed on every CLI login until the password is changed. A value of 0 means that the feature is
+ disabled and warnings are not displayed.
+ type: int
+ superuserlocking:
+ description:
+ - Specifies whether the locking policy configured on the system also applies to the superuser. The value is
+ either enable or disable. This parameter is only supported on systems with a dedicated technician port.
+ choices: ['enable', 'disable']
+ type: str
+ maxfailedlogins:
+ description:
+ - Specifies the number of failed login attempts in range 0 -10 before the user account is locked for the
+ amount of time that is specified in lockout period. A value of 0 means that the feature is disabled and
+ accounts are not locked out after failed login attempts.
+ type: int
+ lockoutperiod:
+ description:
+ - Specifies the number of minutes in range 0 - 10080 that a user is locked out for if the max failed logins
+ value is reached. A value of 0 implies the user is indefinitely locked out when the max failed login
+ attempts are reached.
+ type: int
+ restapitimeout:
+ description:
+ - Specifies token expiry time in minutes in the range 10 - 120.
+ type: int
+ superusermultifactor:
+ description:
+ - Specifies whether the superuser should be prompted for multifactor authentication.
+ choices: ['yes', 'no']
+ type: str
+ sshmaxtries:
+ description:
+ - Specifies the amount of allowed login attempts (in range 1-10) per a single SSH connection.
+ type: int
+ sshgracetime:
+ description:
+ - Specifies the duration of time in seconds in range 15-1800, a user has to enter login factors per SSH
+ connection before the connection is
+ terminated.
+ type: int
+ superuserpasswordkeyrequired:
+ description:
+ - Specifies whether the superuser must provide both a password and SSH key for authentication.
+ type: str
+ choices: ['yes', 'no']
+ disablesuperusergui:
+ description:
+ - Specifies whether GUI access must be disabled for the superuser.
+ choices: ['yes', 'no']
+ type: str
+ disablesuperuserrest:
+ description:
+ - Specifies whether REST API access must be disabled for the superuser.
+ choices: ['yes', 'no']
+ type: str
+ disablesuperusercim:
+ description:
+ - Specifies whether CIMOM access must be disabled for the superuser.
+ choices: ['yes', 'no']
+ type: str
+ resetsshprotocol:
+ description:
+ - Resets the SSH protocol security level to the default value 3 and configures the system to automatically
+ follow the suggested level.
+ type: bool
+ log_path:
+ description:
+ - Path of debug log file.
+ type: str
+ validate_certs:
+ description:
+ - Validates certification.
+ default: false
+ type: bool
+
+author:
+ - Sumit Kumar Gupta (@sumitguptaibm)
+notes:
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = r'''
+- name: Change max failed login limit
+ ibm.storage_virtualize.ibm_sv_manage_security:
+ clustername: "{{cluster}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ log_path: /tmp/playbook.debug
+ maxfailedlogins: 5
+
+- name: Change SSH protocol level
+ ibm.storage_virtualize.ibm_sv_manage_security:
+ clustername: "{{cluster}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ log_path: /tmp/playbook.debug
+ sshprotocol: 2
+'''
+
+RETURN = r'''#'''
+
+from traceback import format_exc
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import (IBMSVCRestApi,
+ svc_argument_spec,
+ get_logger)
+from ansible.module_utils._text import to_native
+
+
+class IBMSVSecurityMgmt(object):
+ def __init__(self):
+ argument_spec = svc_argument_spec()
+ argument_spec.update(
+ dict(
+ sshprotocol=dict(type='int'),
+ guitimeout=dict(type='int'),
+ clitimeout=dict(type='int'),
+ minpasswordlength=dict(type='int', no_log=False),
+ passwordspecialchars=dict(type='int', no_log=False),
+ passworduppercase=dict(type='int', no_log=False),
+ passwordlowercase=dict(type='int', no_log=False),
+ passworddigits=dict(type='int', no_log=False),
+ checkpasswordhistory=dict(type='str', choices=['yes', 'no'], no_log=False),
+ maxpasswordhistory=dict(type='int', no_log=False),
+ minpasswordage=dict(type='int', no_log=False),
+ passwordexpiry=dict(type='int', no_log=False),
+ expirywarning=dict(type='int'),
+ superuserlocking=dict(type='str', choices=['enable', 'disable']),
+ maxfailedlogins=dict(type='int'),
+ lockoutperiod=dict(type='int'),
+ restapitimeout=dict(type='int'),
+ superusermultifactor=dict(type='str', choices=['yes', 'no']),
+ sshmaxtries=dict(type='int'),
+ sshgracetime=dict(type='int'),
+ superuserpasswordkeyrequired=dict(type='str', choices=['yes', 'no']),
+ disablesuperusergui=dict(type='str', choices=['yes', 'no']),
+ disablesuperuserrest=dict(type='str', choices=['yes', 'no']),
+ disablesuperusercim=dict(type='str', choices=['yes', 'no']),
+ resetsshprotocol=dict(type='bool')
+ )
+ )
+
+ self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ for param, value in self.module.params.items():
+ setattr(self, param, value)
+
+ # logging setup
+ log_path = self.module.params['log_path']
+ log = get_logger(self.__class__.__name__, log_path)
+ self.log = log.info
+
+ # Initialize changed variable
+ self.changed = False
+
+ # creating an instance of IBMSVCRestApi
+ self.restapi = IBMSVCRestApi(
+ module=self.module,
+ clustername=self.module.params['clustername'],
+ domain=self.module.params['domain'],
+ username=self.module.params['username'],
+ password=self.module.params['password'],
+ validate_certs=self.module.params['validate_certs'],
+ log_path=log_path,
+ token=self.module.params['token']
+ )
+
+ def change_security_settings(self):
+ cmd = 'chsecurity'
+ cmd_opts = {}
+
+ for attr, value in vars(self).items():
+ if attr in ['restapi', 'log', 'module', 'clustername', 'domain', 'username', 'password', 'validate_certs',
+ 'token', 'log_path']:
+ continue
+ cmd_opts[attr] = value
+
+ result = self.restapi.svc_run_command(cmd, cmd_opts, cmdargs=None)
+ if result == "":
+ self.changed = True
+ self.log("chsecurity successful !!")
+ else:
+ self.module.fail_json(msg="chsecurity failed !!")
+
+ def apply(self):
+ msg = None
+ if self.module.check_mode:
+ self.changed = True
+ else:
+ self.change_security_settings()
+ self.module.exit_json(msg=msg, changed=self.changed)
+
+
+def main():
+ v = IBMSVSecurityMgmt()
+ try:
+ v.apply()
+ except Exception as e:
+ v.log("Exception in apply(): \n%s", format_exc())
+ v.module.fail_json(msg="Module failed. Error [%s]." % to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_sv_manage_snapshot.py b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_sv_manage_snapshot.py
new file mode 100644
index 000000000..3012cc298
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_sv_manage_snapshot.py
@@ -0,0 +1,630 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2022 IBM CORPORATION
+# Author(s): Sanjaikumaar M <sanjaikumaar.m@ibm.com>
+# Sumit Kumar Gupta <sumit.gupta16@ibm.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_sv_manage_snapshot
+short_description: This module manages snapshots (PiT image of a volume) on IBM Storage Virtualize family systems
+version_added: '1.9.0'
+description:
+ - In this implementation, a snapshot is a mutually consistent image of the volumes
+ in a volume group or a list of independent volume(s).
+ - This Ansible module provides the interface to manage snapshots through 'addsnapshot',
+ 'chsnapshot' and 'rmsnapshot' Storage Virtualize commands.
+options:
+ clustername:
+ description:
+ - The hostname or management IP of the Storage Virtualize system.
+ required: true
+ type: str
+ domain:
+ description:
+ - Domain for the Storage Virtualize system.
+ - Valid when hostname is used for the parameter I(clustername).
+ type: str
+ username:
+ description:
+ - REST API username for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ password:
+ description:
+ - REST API password for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ token:
+ description:
+ - The authentication token to verify a user on the Storage Virtualize system.
+ - To generate a token, use the M(ibm.storage_virtualize.ibm_svc_auth) module.
+ type: str
+ log_path:
+ description:
+ - Path of debug log file.
+ type: str
+ state:
+ description:
+ - Creates, updates (C(present)), restores from (C(restore)) or deletes (C(absent)) a snapshot.
+ choices: [ present, restore, absent ]
+ required: true
+ type: str
+ name:
+ description:
+ - Specifies the name of a snapshot.
+ type: str
+ old_name:
+ description:
+ - Specifies the old name of a snapshot.
+ - Valid when I(state=present), to rename the existing snapshot.
+ type: str
+ src_volumegroup_name:
+ description:
+ - Specifies the name of the source volume group for which the snapshot is being created.
+ - I(src_volumegroup_name) and I(src_volume_names) are mutually exclusive for creating snapshot.
+ - Required one of I(src_volumegroup_name) or I(src_volume_names) for creation of snapshot.
+ type: str
+ src_volume_names:
+ description:
+ - Specifies the name of the volumes for which the snapshots are to be created.
+ - List of volume names can be specified with the delimiter colon.
+ - Valid when I(state=present), to create a snapshot.
+ type: str
+ snapshot_pool:
+ description:
+ - Specifies the name of child pool within which the snapshot is being created.
+ type: str
+ ignorelegacy:
+ description:
+ - Specifies the addition of the volume snapshots although there are already legacy FlashCopy mappings using the volume as a source.
+ default: false
+ type: bool
+ ownershipgroup:
+ description:
+ - Specifies the name of the ownershipgroup.
+ - Valid when I(state=present), to update an existing snapshot.
+ type: str
+ safeguarded:
+ description:
+ - Flag to create a safeguarded snapshot.
+ - I(safeguarded) and I(retentiondays) are required together.
+ - Supported in SV build 8.5.2.0 or later.
+ type: bool
+ version_added: 1.10.0
+ retentiondays:
+ description:
+ - Specifies the retention period in days.
+ - I(safeguarded) and I(retentiondays) are required together.
+ - Applies, when I(state=present) to create a safeguarded snapshot.
+ type: int
+ version_added: 1.10.0
+ retentionminutes:
+ description:
+ - Specifies the retention period in minutes in range 1 - 1440.
+ - I(retentionminutes) and I(retentiondays) are mutually exclusive.
+ - Applies, when I(state=present) to create a transient snapshot.
+ type: int
+ version_added: 2.2.0
+ validate_certs:
+ description:
+ - Validates certification.
+ default: false
+ type: bool
+author:
+ - Sanjaikumaar M (@sanjaikumaar)
+ - Sumit Kumar Gupta (@sumitguptaibm)
+notes:
+ - This module supports C(check_mode).
+ - This module automates the new Snapshot function, implemented by Storage Virtualize, which is using a
+ simplified management model. Any user requiring the flexibility available with legacy
+ FlashCopy can continue to use the existing module M(ibm.storage_virtualize.ibm_svc_manage_flashcopy).
+ - Snapshots created by this Ansible module are not directly accessible from the hosts.
+ To create a new group of host accessible volumes from a snapshot,
+ use M(ibm.storage_virtualize.ibm_svc_manage_volumegroup) module.
+'''
+
+EXAMPLES = '''
+- name: Create volumegroup snapshot
+ ibm.storage_virtualize.ibm_sv_manage_snapshot:
+ clustername: '{{clustername}}'
+ username: '{{username}}'
+ password: '{{password}}'
+ name: ansible_1
+ src_volumegroup_name: volumegroup1
+ snapshot_pool: Pool0Childpool0
+ state: present
+- name: Create volumes snapshot
+ ibm.storage_virtualize.ibm_sv_manage_snapshot:
+ clustername: '{{clustername}}'
+ username: '{{username}}'
+ password: '{{password}}'
+ name: ansible_2
+ src_volume_names: vdisk0:vdisk1
+ snapshot_pool: Pool0Childpool0
+ state: present
+- name: Create safeguarded snapshot
+ ibm.storage_virtualize.ibm_sv_manage_snapshot:
+ clustername: '{{clustername}}'
+ username: '{{username}}'
+ password: '{{password}}'
+ name: ansible_2
+ src_volume_names: vdisk0:vdisk1
+ safeguarded: true
+ retentiondays: 1
+ snapshot_pool: Pool0Childpool0
+ state: present
+- name: Update snapshot ansible_2
+ ibm.storage_virtualize.ibm_sv_manage_snapshot:
+ clustername: '{{clustername}}'
+ username: '{{username}}'
+ password: '{{password}}'
+ name: ansible_new
+ old_name: ansible_2
+ ownershipgroup: ownershipgroup0
+ state: present
+- name: Restore all volumes of a volumegroup from a snapshot
+ ibm.storage_virtualize.ibm_sv_manage_snapshot:
+ clustername: '{{clustername}}'
+ username: '{{username}}'
+ password: '{{password}}'
+ name: snapshot0
+ src_volumegroup_name: volumegroup1
+ snapshot_pool: Pool0Childpool0
+ state: restore
+- name: Restore subset of volumes of a volumegroup from snapshot
+ ibm.storage_virtualize.ibm_sv_manage_snapshot:
+ clustername: '{{clustername}}'
+ username: '{{username}}'
+ password: '{{password}}'
+ name: snapshot0
+ src_volumegroup_name: volumegroup1
+ src_volume_names: vdisk0:vdisk1
+ snapshot_pool: Pool0Childpool0
+ state: restore
+- name: Create transient snapshot
+ ibm.storage_virtualize.ibm_sv_manage_snapshot:
+ clustername: '{{clustername}}'
+ username: '{{username}}'
+ password: '{{password}}'
+ name: snapshot0
+ src_volume_names: vdisk0:vdisk1
+ safeguarded: true
+ retentionminutes: 5
+ snapshot_pool: Pool0Childpool0
+ state: present
+- name: Delete volumegroup snapshot
+ ibm.storage_virtualize.ibm_sv_manage_snapshot:
+ clustername: '{{clustername}}'
+ username: '{{username}}'
+ password: '{{password}}'
+ name: ansible_1
+ src_volumegroup_name: volumegroup1
+ state: absent
+- name: Delete volume snapshot
+ ibm.storage_virtualize.ibm_sv_manage_snapshot:
+ clustername: '{{clustername}}'
+ username: '{{username}}'
+ password: '{{password}}'
+ name: ansible_new
+ state: absent
+'''
+
+RETURN = '''#'''
+
+from traceback import format_exc
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import (
+ IBMSVCRestApi,
+ svc_argument_spec,
+ strtobool,
+ get_logger
+)
+from ansible.module_utils._text import to_native
+
+MIN_SNAPSHOT_RETENTION_MINUTES = 1
+MAX_SNAPSHOT_RETENTION_MINUTES = 1440
+
+
+class IBMSVSnapshot:
+
+ def __init__(self):
+ argument_spec = svc_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(
+ type='str',
+ required=True,
+ choices=['present', 'restore', 'absent']
+ ),
+ name=dict(
+ type='str',
+ ),
+ old_name=dict(
+ type='str'
+ ),
+ snapshot_pool=dict(
+ type='str',
+ ),
+ src_volumegroup_name=dict(
+ type='str',
+ ),
+ src_volume_names=dict(
+ type='str',
+ ),
+ ignorelegacy=dict(
+ type='bool',
+ default=False
+ ),
+ ownershipgroup=dict(
+ type='str',
+ ),
+ safeguarded=dict(
+ type='bool'
+ ),
+ retentiondays=dict(
+ type='int',
+ ),
+ retentionminutes=dict(
+ type='int'
+ )
+ )
+ )
+
+ self.module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ # Required parameters
+ self.name = self.module.params['name']
+ self.state = self.module.params['state']
+
+ # Default paramters
+ self.ignorelegacy = self.module.params['ignorelegacy']
+
+ # Optional parameters
+ self.old_name = self.module.params.get('old_name', '')
+ self.ownershipgroup = self.module.params.get('ownershipgroup', '')
+ self.snapshot_pool = self.module.params.get('snapshot_pool', '')
+ self.volumegroup = self.module.params.get('src_volumegroup_name', '')
+ self.volumes = self.module.params.get('src_volume_names', '')
+ self.safeguarded = self.module.params.get('safeguarded', False)
+ self.retentiondays = self.module.params.get('retentiondays')
+ self.retentionminutes = self.module.params.get('retentionminutes')
+
+ self.basic_checks()
+
+ # logging setup
+ self.log_path = self.module.params['log_path']
+ log = get_logger(self.__class__.__name__, self.log_path)
+ self.log = log.info
+
+ # Dynamic variables
+ self.changed = False
+ self.msg = ''
+ self.parentuid = None
+ self.lsvg_data = {}
+ self.lsv_data = {}
+
+ self.restapi = IBMSVCRestApi(
+ module=self.module,
+ clustername=self.module.params['clustername'],
+ domain=self.module.params['domain'],
+ username=self.module.params['username'],
+ password=self.module.params['password'],
+ validate_certs=self.module.params['validate_certs'],
+ log_path=self.log_path,
+ token=self.module.params['token']
+ )
+
+ def basic_checks(self):
+ if not self.name:
+ self.module.fail_json(msg='Missing mandatory parameter: name')
+
+ if not self.state:
+ self.module.fail_json(msg='Missing mandatory parameter: state')
+
+ if self.state == 'present':
+ if self.volumegroup and self.volumes:
+ self.module.fail_json(
+ msg='Mutually exclusive parameters: src_volumegroup_name, src_volume_names'
+ )
+ if self.retentionminutes is not None:
+ if (self.retentionminutes < MIN_SNAPSHOT_RETENTION_MINUTES or
+ self.retentionminutes > MAX_SNAPSHOT_RETENTION_MINUTES):
+ self.module.fail_json(
+ msg='Invalid value for retentionminutes parameter. Valid range 1-1440.'
+ )
+
+ elif self.state == 'restore':
+ # Check mandatory parameter src_volumegroup_name
+ if not self.volumegroup:
+ self.module.fail_json(
+ msg='Missing mandatory parameter src_volumegroup_name'
+ )
+ invalids = ('snapshot_pool', 'ignorelegacy', 'ownershipgroup',
+ 'old_name', 'safeguarded', 'retentiondays', 'retentionminutes')
+ invalid_exists = ', '.join((var for var in invalids if getattr(self, var)))
+ if invalid_exists:
+ self.module.fail_json(
+ msg='Invalid parameters for state=restore: {0}'.format(invalid_exists)
+ )
+
+ elif self.state == 'absent':
+ invalids = ('snapshot_pool', 'ignorelegacy', 'ownershipgroup',
+ 'old_name', 'safeguarded', 'retentiondays', 'retentionminutes')
+ invalid_exists = ', '.join((var for var in invalids if getattr(self, var)))
+
+ if self.volumes:
+ invalid_exists = 'src_volume_names, {0}'.format(invalid_exists)
+
+ if invalid_exists:
+ self.module.fail_json(
+ msg='Invalid parameters for state=absent: {0}'.format(invalid_exists)
+ )
+ else:
+ self.module.fail_json(msg='State should be one of present,restore or absent')
+
+ def create_validation(self):
+ if self.old_name:
+ self.rename_validation([])
+
+ if not self.volumegroup and not self.volumes:
+ self.module.fail_json(
+ msg='Either src_volumegroup_name or src_volume_names should be passed during snapshot creation.'
+ )
+
+ if self.ownershipgroup:
+ self.module.fail_json(
+ msg='`ownershipgroup` parameter is not supported during snapshot creation'
+ )
+
+ def rename_validation(self, updates):
+ if self.old_name and self.name:
+
+ if self.name == self.old_name:
+ self.module.fail_json(msg='New name and old name should be different.')
+
+ new = self.is_snapshot_exists()
+ existing = self.is_snapshot_exists(old_name=self.old_name)
+
+ if existing:
+ if new:
+ self.module.fail_json(
+ msg='Snapshot ({0}) already exists for the given new name.'.format(self.name)
+ )
+ else:
+ updates.append('name')
+ else:
+ if not new:
+ self.module.fail_json(
+ msg='Snapshot ({0}) does not exists for the given old name.'.format(self.old_name)
+ )
+ else:
+ self.module.exit_json(
+ msg='Snapshot ({0}) already renamed. No modifications done.'.format(self.name)
+ )
+
+ def is_snapshot_exists(self, old_name=None, force=False):
+ old_name = old_name if old_name else self.name
+ if self.volumegroup:
+ data = self.lsvolumegroupsnapshot(old_name=old_name, force=force)
+ self.parentuid = data.get('parent_uid')
+ else:
+ if self.lsv_data.get('snapshot_name') == old_name and not force:
+ return self.lsv_data
+ cmdopts = {
+ "filtervalue": "snapshot_name={0}".format(old_name)
+ }
+ result = self.restapi.svc_obj_info(
+ cmd='lsvolumesnapshot',
+ cmdopts=cmdopts,
+ cmdargs=None
+ )
+ try:
+ data = next(
+ filter(
+ lambda x: x['volume_group_name'] == '',
+ result
+ )
+ )
+ except StopIteration:
+ return {}
+ else:
+ self.lsv_data = data
+ self.parentuid = data.get('parent_uid')
+
+ return data
+
+ def lsvolumegroupsnapshot(self, force=False, old_name=None, parentuid=None):
+ old_name = old_name if old_name else self.name
+ if self.lsvg_data.get('name') == old_name and not force:
+ return self.lsvg_data
+
+ cmdopts = {
+ 'snapshot': old_name
+ }
+ if parentuid:
+ cmdopts['parentuid'] = self.parentuid
+ else:
+ cmdopts['volumegroup'] = self.volumegroup
+
+ data = {}
+ result = self.restapi.svc_obj_info(
+ cmd='lsvolumegroupsnapshot',
+ cmdopts=cmdopts,
+ cmdargs=None
+ )
+
+ if isinstance(result, list):
+ for res in result:
+ data = res
+ else:
+ data = result
+
+ self.lsvg_data = data
+
+ return data
+
+ def create_snapshot(self):
+ self.create_validation()
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ cmd = 'addsnapshot'
+ cmdopts = {
+ 'name': self.name
+ }
+
+ if self.snapshot_pool:
+ cmdopts['pool'] = self.snapshot_pool
+ if self.ignorelegacy:
+ cmdopts['ignorelegacy'] = self.ignorelegacy
+ if self.retentiondays:
+ cmdopts['retentiondays'] = self.retentiondays
+ if self.retentionminutes:
+ cmdopts['retentionminutes'] = self.retentionminutes
+ if self.safeguarded:
+ cmdopts['safeguarded'] = self.safeguarded
+
+ if self.volumegroup:
+ cmdopts['volumegroup'] = self.volumegroup
+ else:
+ cmdopts['volumes'] = self.volumes
+
+ self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
+ self.log('Snapshot (%s) created', self.name)
+ self.changed = True
+
+ def restore_from_snapshot(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ cmd = 'restorefromsnapshot'
+ cmdopts = {
+ 'snapshot': self.name
+ }
+ if self.volumegroup:
+ cmdopts['volumegroup'] = self.volumegroup
+ if self.volumes:
+ cmdopts['volumes'] = self.volumes
+
+ self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
+ self.changed = True
+
+ def snapshot_probe(self):
+ updates = []
+ self.rename_validation(updates)
+ kwargs = dict((k, getattr(self, k)) for k in ['old_name', 'parentuid'] if getattr(self, k))
+ ls_data = self.lsvolumegroupsnapshot(**kwargs)
+
+ if self.ownershipgroup and ls_data['owner_name'] != self.ownershipgroup:
+ updates.append('ownershipgroup')
+
+ if self.safeguarded in {True, False} and self.safeguarded != strtobool(ls_data.get('safeguarded', 0)):
+ self.module.fail_json(
+ msg='Following paramter not applicable for update operation: safeguarded'
+ )
+
+ self.log('Snapshot probe result: %s', updates)
+ return updates
+
+ def update_snapshot(self, updates):
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ old_name = self.old_name if self.old_name else self.name
+ cmd = 'chsnapshot'
+ cmdopts = dict((k, getattr(self, k)) for k in updates)
+ cmdopts['snapshot'] = old_name
+
+ if self.volumegroup:
+ cmdopts['volumegroup'] = self.volumegroup
+ else:
+ cmdopts['parentuid'] = self.parentuid
+
+ self.restapi.svc_run_command(cmd, cmdopts=cmdopts, cmdargs=None)
+ self.changed = True
+
+ def delete_snapshot(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ cmd = 'rmsnapshot'
+ cmdopts = {
+ 'snapshot': self.name
+ }
+
+ if self.volumegroup:
+ cmdopts['volumegroup'] = self.volumegroup
+ else:
+ cmdopts['parentuid'] = self.parentuid
+
+ self.restapi.svc_run_command(cmd, cmdopts=cmdopts, cmdargs=None)
+ self.changed = True
+
+ still_exists = self.is_snapshot_exists(force=True)
+ if still_exists:
+ self.msg = 'Snapshot ({0}) will be in the dependent_delete '\
+ 'state until those dependencies are removed'.format(self.name)
+ else:
+ self.msg = 'Snapshot ({0}) deleted.'.format(self.name)
+
+ def apply(self):
+ if self.is_snapshot_exists(old_name=self.old_name):
+ if self.state == 'present':
+ modifications = self.snapshot_probe()
+ if any(modifications):
+ if self.retentionminutes is not None:
+ self.module.fail_json(msg='Invalid parameter retentionminutes for update operation')
+ self.update_snapshot(modifications)
+ self.msg = 'Snapshot ({0}) updated.'.format(self.name)
+ else:
+ self.msg = 'Snapshot ({0}) already exists. No modifications done.'.format(self.name)
+ elif self.state == 'restore':
+ self.restore_from_snapshot()
+ if self.volumes:
+ self.msg = 'Volumes ({0}) of Volumegroup ({1}) restored from Snapshot ({2}).'.\
+ format(self.volumes, self.volumegroup, self.name)
+ else:
+ self.msg = 'Volumegroup ({0}) restored from Snapshot ({1}).'.format(self.volumegroup, self.name)
+ else:
+ self.delete_snapshot()
+ else:
+ if self.state == 'absent':
+ self.msg = 'Snapshot ({0}) does not exist.'.format(self.name)
+ elif self.state == 'restore':
+ self.module.fail_json(msg='Snapshot ({0}) does not exist.'.format(self.name))
+ else:
+ self.create_snapshot()
+ self.msg = 'Snapshot ({0}) created.'.format(self.name)
+
+ if self.module.check_mode:
+ self.msg = 'skipping changes due to check mode.'
+
+ self.module.exit_json(
+ changed=self.changed,
+ msg=self.msg
+ )
+
+
+def main():
+ v = IBMSVSnapshot()
+ try:
+ v.apply()
+ except Exception as e:
+ v.log('Exception in apply(): \n%s', format_exc())
+ v.module.fail_json(msg='Module failed. Error [%s].' % to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_sv_manage_snapshotpolicy.py b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_sv_manage_snapshotpolicy.py
new file mode 100644
index 000000000..dea2a5e73
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_sv_manage_snapshotpolicy.py
@@ -0,0 +1,365 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2022 IBM CORPORATION
+# Author(s): Shilpi Jain <shilpi.jain1@ibm.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_sv_manage_snapshotpolicy
+short_description: This module manages snapshot policy configuration on IBM Storage Virtualize family systems
+version_added: "1.9.0"
+description:
+ - Ansible interface to manage 'mksnapshotpolicy' and 'rmsnapshotpolicy' snapshot policy commands.
+ - Snapshot policy is introduced in IBM Storage Virtualize 8.5.1.0.
+options:
+ clustername:
+ description:
+ - The hostname or management IP of the Storage Virtualize system.
+ required: true
+ type: str
+ domain:
+ description:
+ - Domain for the Storage Virtualize system.
+ - Valid when hostname is used for the parameter I(clustername).
+ type: str
+ username:
+ description:
+ - REST API username for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ password:
+ description:
+ - REST API password for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ token:
+ description:
+ - The authentication token to verify a user on the Storage Virtualize system.
+ - To generate a token, use the M(ibm.storage_virtualize.ibm_svc_auth) module.
+ type: str
+ log_path:
+ description:
+ - Path of debug log file.
+ type: str
+ state:
+ description:
+ - Creates (C(present)) or deletes (C(absent)) a snapshot policy.
+ - Resume (C(resume)) or suspend (C(suspend)) the snapshot policy, system-wide.
+ choices: [ present, absent, suspend, resume ]
+ required: true
+ type: str
+ name:
+ description:
+ - Specifies a unique name of the snapshot policy.
+ - Not applicable when I(state=suspend) or I(state=resume).
+ type: str
+ backupunit:
+ description:
+ - Specifies the backup unit in mentioned metric.
+ - Applies when I(state=present).
+ choices: [ minute, hour, day, week, month ]
+ type: str
+ backupinterval:
+ description:
+ - Specifies the backup interval.
+ - Applies when I(state=present).
+ type: str
+ backupstarttime:
+ description:
+ - Specifies the start time of backup in the format YYMMDDHHMM.
+ - Applies when I(state=present).
+ type: str
+ retentiondays:
+ description:
+ - Specifies the retention days for the backup.
+ - Applies when I(state=present).
+ type: str
+ removefromvolumegroups:
+ description:
+ - Specify to remove the volume group association from the snapshot policy.
+ - Applies when I(state=absent).
+ - This option is allowed only for SecurityAdmin users.
+ type: bool
+ validate_certs:
+ description:
+ - Validates certification.
+ default: false
+ type: bool
+author:
+ - Shilpi Jain(@Shilpi-J)
+notes:
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = '''
+- name: Create snapshot policy
+ ibm.storage_virtualize.ibm_sv_manage_snapshotpolicy:
+ clustername: "{{cluster}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ name: policy0
+ backupunit: day
+ backupinterval: 1
+ backupstarttime: 2102281800
+ retentiondays: 15
+ state: present
+- name: Suspend snapshot policy functionality
+ ibm.storage_virtualize.ibm_sv_manage_snapshotpolicy:
+ clustername: "{{cluster}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ state: suspend
+- name: Resume snapshot policy functionality
+ ibm.storage_virtualize.ibm_sv_manage_snapshotpolicy:
+ clustername: "{{cluster}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ state: resume
+- name: Delete snapshot policy
+ ibm.storage_virtualize.ibm_sv_manage_snapshotpolicy:
+ clustername: "{{cluster}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ name: policy0
+ state: absent
+'''
+
+RETURN = '''#'''
+
+from traceback import format_exc
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import (
+ IBMSVCRestApi, svc_argument_spec,
+ get_logger
+)
+from ansible.module_utils._text import to_native
+
+
+class IBMSVCSnapshotPolicy:
+
+ def __init__(self):
+ argument_spec = svc_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(
+ type='str',
+ required=True,
+ choices=['present', 'absent', 'suspend', 'resume']
+ ),
+ name=dict(
+ type='str',
+ ),
+ backupunit=dict(
+ type='str',
+ choices=['minute', 'hour', 'day', 'week', 'month'],
+ ),
+ backupinterval=dict(
+ type='str',
+ ),
+ backupstarttime=dict(
+ type='str',
+ ),
+ retentiondays=dict(
+ type='str',
+ ),
+ removefromvolumegroups=dict(
+ type='bool'
+ ),
+ )
+ )
+
+ self.module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ # Required parameters
+ self.name = self.module.params['name']
+ self.state = self.module.params['state']
+ self.backupunit = self.module.params.get('backupunit', '')
+ self.backupinterval = self.module.params.get('backupinterval', '')
+ self.backupstarttime = self.module.params.get('backupstarttime', '')
+ self.retentiondays = self.module.params.get('retentiondays', '')
+ self.removefromvolumegroups = self.module.params.get('removefromvolumegroups', False)
+
+ self.basic_checks()
+
+ # Variable to cache data
+ self.snapshot_policy_details = None
+
+ # logging setup
+ self.log_path = self.module.params['log_path']
+ log = get_logger(self.__class__.__name__, self.log_path)
+ self.log = log.info
+ self.changed = False
+ self.msg = ''
+
+ self.restapi = IBMSVCRestApi(
+ module=self.module,
+ clustername=self.module.params['clustername'],
+ domain=self.module.params['domain'],
+ username=self.module.params['username'],
+ password=self.module.params['password'],
+ validate_certs=self.module.params['validate_certs'],
+ log_path=self.log_path,
+ token=self.module.params['token']
+ )
+
+ def basic_checks(self):
+ if self.state == 'present':
+ fields = ['name', 'backupinterval', 'backupstarttime', 'retentiondays', 'backupunit']
+ exists = list(filter(lambda x: not getattr(self, x), fields))
+
+ if any(exists):
+ self.module.fail_json(
+ msg="State is present but following parameters are missing: {0}".format(', '.join(exists))
+ )
+
+ if self.removefromvolumegroups:
+ self.module.fail_json(
+ msg="`removefromvolumegroups` parameter is not supported when state=present"
+ )
+ elif self.state == 'absent':
+ if not self.name:
+ self.module.fail_json(msg="Missing mandatory parameter: name")
+
+ fields = ['backupinterval', 'backupstarttime', 'retentiondays', 'backupunit']
+ exists = list(filter(lambda x: getattr(self, x) or getattr(self, x) == '', fields))
+
+ if any(exists):
+ self.module.fail_json(msg='{0} should not be passed when state=absent'.format(', '.join(exists)))
+ elif self.state in ['suspend', 'resume']:
+ fields = ['name', 'backupinterval', 'backupstarttime', 'retentiondays', 'backupunit']
+ exists = list(filter(lambda x: getattr(self, x) or getattr(self, x) == '', fields))
+
+ if any(exists):
+ self.module.fail_json(msg='{0} should not be passed when state={1}'.format(', '.join(exists), self.state))
+
+ def policy_exists(self):
+ merged_result = {}
+ data = self.restapi.svc_obj_info(
+ cmd='lssnapshotschedule',
+ cmdopts=None,
+ cmdargs=[self.name]
+ )
+ if isinstance(data, list):
+ for d in data:
+ merged_result.update(d)
+ else:
+ merged_result = data
+
+ self.snapshot_policy_details = merged_result
+
+ return merged_result
+
+ def create_snapshot_policy(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ cmd = 'mksnapshotpolicy'
+ cmdopts = {
+ 'name': self.name,
+ 'backupstarttime': self.backupstarttime,
+ 'backupinterval': self.backupinterval,
+ 'backupunit': self.backupunit,
+ 'retentiondays': self.retentiondays
+ }
+
+ self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
+ self.log('Snapshot policy (%s) created', self.name)
+ self.changed = True
+
+ def snapshot_policy_probe(self):
+ field_mappings = (
+ ('backupinterval', self.snapshot_policy_details['backup_interval']),
+ ('backupstarttime', self.snapshot_policy_details['backup_start_time']),
+ ('retentiondays', self.snapshot_policy_details['retention_days']),
+ ('backupunit', self.snapshot_policy_details['backup_unit'])
+ )
+ updates = []
+
+ for field, existing_value in field_mappings:
+ if field == 'backupstarttime':
+ updates.append(existing_value != '{0}00'.format(getattr(self, field)))
+ else:
+ updates.append(existing_value != getattr(self, field))
+
+ return updates
+
+ def delete_snapshot_policy(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ cmd = 'rmsnapshotpolicy'
+ cmdargs = [self.name]
+ cmdopts = None
+
+ if self.removefromvolumegroups:
+ cmdopts = {
+ 'removefromvolumegroups': True
+ }
+
+ self.restapi.svc_run_command(cmd, cmdopts=cmdopts, cmdargs=cmdargs)
+ self.log('Snapshot policy (%s) deleted', self.name)
+ self.changed = True
+
+ def update_snapshot_scheduler(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ cmd = 'chsystem'
+ cmdopts = {'snapshotpolicysuspended': 'yes' if self.state == 'suspend' else 'no'}
+
+ self.restapi.svc_run_command(cmd, cmdopts=cmdopts, cmdargs=None)
+ self.log('Snapshot scheduler status changed: %s', self.state)
+ self.changed = True
+
+ def apply(self):
+ if self.state in ['resume', 'suspend']:
+ self.update_snapshot_scheduler()
+ self.msg = 'Snapshot scheduler {0}ed'.format(self.state.rstrip('e'))
+ else:
+ if self.policy_exists():
+ if self.state == 'present':
+ modifications = self.snapshot_policy_probe()
+ if any(modifications):
+ self.msg = 'Policy modification is not supported in ansible. Please delete and recreate new policy.'
+ else:
+ self.msg = 'Snapshot policy ({0}) already exists. No modifications done.'.format(self.name)
+ else:
+ self.delete_snapshot_policy()
+ self.msg = 'Snapshot policy ({0}) deleted.'.format(self.name)
+ else:
+ if self.state == 'absent':
+ self.msg = 'Snapshot policy ({0}) does not exist. No modifications done.'.format(self.name)
+ else:
+ self.create_snapshot_policy()
+ self.msg = 'Snapshot policy ({0}) created.'.format(self.name)
+
+ if self.module.check_mode:
+ self.msg = 'skipping changes due to check mode.'
+
+ self.module.exit_json(
+ changed=self.changed,
+ msg=self.msg
+ )
+
+
+def main():
+ v = IBMSVCSnapshotPolicy()
+ try:
+ v.apply()
+ except Exception as e:
+ v.log("Exception in apply(): \n%s", format_exc())
+ v.module.fail_json(msg="Module failed. Error [%s]." % to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_sv_manage_ssl_certificate.py b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_sv_manage_ssl_certificate.py
new file mode 100644
index 000000000..008faa12c
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_sv_manage_ssl_certificate.py
@@ -0,0 +1,158 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2022 IBM CORPORATION
+# Author(s): Sanjaikumaar M <sanjaikumaar.m@ibm.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_sv_manage_ssl_certificate
+short_description: This module exports existing system-signed certificate on to IBM Storage Virtualize family systems
+version_added: '1.10.0'
+description:
+ - Only existing system-signed certificates can be exported. External authority certificate generation is not supported.
+options:
+ clustername:
+ description:
+ - The hostname or management IP of the Storage Virtualize system.
+ required: true
+ type: str
+ domain:
+ description:
+ - Domain for the Storage Virtualize system.
+ - Valid when the hostname is used for the parameter I(clustername).
+ type: str
+ username:
+ description:
+ - REST API username for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ password:
+ description:
+ - REST API password for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ token:
+ description:
+ - The authentication token to verify a user on the Storage Virtualize system.
+ - To generate a token, use the M(ibm.storage_virtualize.ibm_svc_auth) module.
+ type: str
+ log_path:
+ description:
+ - Path of debug log file.
+ type: str
+ certificate_type:
+ description:
+ - Specify the certificate type to be exported.
+ choices: [ 'system' ]
+ default: 'system'
+ type: str
+ validate_certs:
+ description:
+ - Validates certification.
+ default: false
+ type: bool
+author:
+ - Sanjaikumaar M(@sanjaikumaar)
+notes:
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = '''
+- name: Export SSL certificate internally
+ ibm.storage_virtualize.ibm_sv_manage_ssl_certificate:
+ clustername: "x.x.x.x"
+ username: "username"
+ password: "password"
+ certificate_type: "system"
+'''
+
+RETURN = '''#'''
+
+from traceback import format_exc
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import (
+ IBMSVCRestApi, svc_argument_spec,
+ get_logger
+)
+from ansible.module_utils._text import to_native
+
+
+class IBMSVSSLCertificate:
+
+ def __init__(self):
+ argument_spec = svc_argument_spec()
+ argument_spec.update(
+ dict(
+ certificate_type=dict(
+ type='str',
+ choices=['system'],
+ default='system'
+ )
+ )
+ )
+
+ self.module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+ # Default parameters
+ self.certificate_type = self.module.params['certificate_type']
+
+ # logging setup
+ self.log_path = self.module.params['log_path']
+ log = get_logger(self.__class__.__name__, self.log_path)
+ self.log = log.info
+
+ # Dynamic variables
+ self.changed = False
+ self.msg = ''
+
+ self.restapi = IBMSVCRestApi(
+ module=self.module,
+ clustername=self.module.params['clustername'],
+ domain=self.module.params['domain'],
+ username=self.module.params['username'],
+ password=self.module.params['password'],
+ validate_certs=self.module.params['validate_certs'],
+ log_path=self.log_path,
+ token=self.module.params['token']
+ )
+
+ def export_cert(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ self.restapi.svc_run_command('chsystemcert', cmdopts=None, cmdargs=['-export'])
+ self.log('Certificate exported')
+ self.changed = True
+
+ def apply(self):
+ if self.certificate_type == 'system':
+ self.export_cert()
+ self.msg = 'Certificate exported.'
+
+ if self.module.check_mode:
+ self.msg = 'skipping changes due to check mode.'
+
+ self.module.exit_json(
+ changed=self.changed,
+ msg=self.msg
+ )
+
+
+def main():
+ v = IBMSVSSLCertificate()
+ try:
+ v.apply()
+ except Exception as e:
+ v.log('Exception in apply(): \n%s', format_exc())
+ v.module.fail_json(msg='Module failed. Error [%s].' % to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_sv_manage_storage_partition.py b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_sv_manage_storage_partition.py
new file mode 100644
index 000000000..a4e0f423c
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_sv_manage_storage_partition.py
@@ -0,0 +1,359 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2023 IBM CORPORATION
+# Author(s): Shilpi Jain <shilpi.jain1@ibm.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_sv_manage_storage_partition
+short_description: This module manages storage partition on IBM Storage Virtualize family systems
+version_added: '2.1.0'
+description:
+ - This Ansible module provides the interface to manage syslog servers through 'mksyslogserver',
+ 'chsyslogserver' and 'rmsyslogserver' Storage Virtualize commands.
+ - The Policy based High Availability (HA) solution uses Storage Partitions. These partitions contain volumes,
+ volume groups, host and host-to-volume mappings.
+options:
+ clustername:
+ description:
+ - The hostname or management IP of the Storage Virtualize system.
+ required: true
+ type: str
+ domain:
+ description:
+ - Domain for the Storage Virtualize system.
+ - Valid when hostname is used for the parameter I(clustername).
+ type: str
+ username:
+ description:
+ - REST API username for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ password:
+ description:
+ - REST API password for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ token:
+ description:
+ - The authentication token to verify a user on the Storage Virtualize system.
+ - To generate a token, use the M(ibm.storage_virtualize.ibm_svc_auth) module.
+ type: str
+ log_path:
+ description:
+ - Path of debug log file.
+ type: str
+ state:
+ description:
+ - Creates, updates (C(present)) or deletes (C(absent)) a storage partition.
+ choices: [ present, absent ]
+ required: true
+ type: str
+ name:
+ description:
+ - Specifies the name of a storage partition.
+ type: str
+ required: true
+ replicationpolicy:
+ description:
+ - Specifies the replication policy for the storage partition.
+ type: str
+ noreplicationpolicy:
+ description:
+ - Unassigns the current replication policy from the volume group. This parameter, if used without I(deletepreferredmanagementcopy)
+ parameter, is allowed only on active management system.
+ type: bool
+ preferredmanagementsystem:
+ description:
+ - Changes the preferred management system for the storage partition.
+ - Permitted only from the system which is the active management system.
+ type: str
+ deletepreferredmanagementcopy:
+ description:
+ - This parameter is to be used along with I(noreplicationpolicy) parameter and active management system
+ must NOT be the same as the preferred management system.
+ type: bool
+ deletenonpreferredmanagementobjects:
+ description:
+ - If the storage partition has a replication policy and associated objects, such as volumes, volumes groups, hosts or host mappings,
+ one of the two I(deletenonpreferredmanagementobjects) or I(deletepreferredmanagementobjects) parmeters is required. If specified,
+ the command is only permitted on the active management system, and requires that the active management system is the same as the
+ preferred management system.
+ - Applies when I(state=absent).
+ type: bool
+ deletepreferredmanagementobjects:
+ description:
+ - If the storage partition has a replication policy and associated objects, such as volumes, volumes groups, hosts or host mappings,
+ one of the two I(deletenonpreferredmanagementobjects) or I(deletepreferredmanagementobjects) parmeters is required. If the storage
+ partition cannot be managed at the preferred management system then I(deletepreferredmanagementobjects) to be used to remove the
+ storage partition and unassign the replication policy.
+ - Applies when I(state=absent).
+ type: bool
+ validate_certs:
+ description:
+ - Validates certification.
+ default: false
+ type: bool
+author:
+ - Shilpi Jain (@Shilpi-J)
+notes:
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = '''
+- name: Create Storage Partition
+ ibm.storage_virtualize.ibm_sv_manage_storage_partition:
+ clustername: '{{clustername}}'
+ username: '{{username}}'
+ password: '{{password}}'
+ name: partition1
+ state: present
+ replicationpolicy: ha_policy_1
+- name: Delete the storage partition
+ ibm.storage_virtualize.ibm_sv_manage_storage_partition:
+ clustername: '{{clustername}}'
+ username: '{{username}}'
+ password: '{{password}}'
+ name: partition1
+ state: absent
+'''
+
+RETURN = '''#'''
+
+from traceback import format_exc
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import (
+ IBMSVCRestApi,
+ svc_argument_spec,
+ get_logger
+)
+from ansible.module_utils._text import to_native
+
+
+class IBMSVStoragePartition:
+
+ def __init__(self):
+ argument_spec = svc_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(
+ type='str',
+ required=True,
+ choices=['present', 'absent']
+ ),
+ name=dict(
+ type='str',
+ required=True
+ ),
+ replicationpolicy=dict(
+ type='str'
+ ),
+ noreplicationpolicy=dict(
+ type='bool'
+ ),
+ preferredmanagementsystem=dict(
+ type='str'
+ ),
+ deletepreferredmanagementcopy=dict(
+ type='bool'
+ ),
+ deletenonpreferredmanagementobjects=dict(
+ type='bool'
+ ),
+ deletepreferredmanagementobjects=dict(
+ type='bool'
+ )
+ )
+ )
+
+ self.module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ # Required parameters
+ self.name = self.module.params['name']
+ self.state = self.module.params['state']
+
+ # Optional parameters
+ self.replicationpolicy = self.module.params.get('replicationpolicy', '')
+ self.noreplicationpolicy = self.module.params.get('noreplicationpolicy', '')
+ self.preferredmanagementsystem = self.module.params.get('preferredmanagementsystem', '')
+ self.deletepreferredmanagementcopy = self.module.params.get('deletepreferredmanagementcopy', '')
+ self.deletenonpreferredmanagementobjects = self.module.params.get('deletenonpreferredmanagementobjects', '')
+ self.deletepreferredmanagementobjects = self.module.params.get('deletepreferredmanagementobjects', '')
+
+ # logging setup
+ self.log_path = self.module.params['log_path']
+ log = get_logger(self.__class__.__name__, self.log_path)
+ self.log = log.info
+
+ # Dynamic variables
+ self.changed = False
+ self.msg = ''
+
+ self.basic_checks()
+
+ self.restapi = IBMSVCRestApi(
+ module=self.module,
+ clustername=self.module.params['clustername'],
+ domain=self.module.params['domain'],
+ username=self.module.params['username'],
+ password=self.module.params['password'],
+ validate_certs=self.module.params['validate_certs'],
+ log_path=self.log_path,
+ token=self.module.params['token']
+ )
+
+ def basic_checks(self):
+ if not self.name:
+ self.module.fail_json(msg='Missing mandatory parameter: name')
+
+ if self.state == 'present':
+ if self.deletenonpreferredmanagementobjects or self.deletepreferredmanagementobjects:
+ self.module.fail_json(
+ msg='Parameters not allowed while creation or updation: '
+ 'deletenonpreferredmanagementobjects, deletepreferredmanagementobjects'
+ )
+ else:
+ if self.replicationpolicy or self.noreplicationpolicy or self.preferredmanagementsystem or self.deletepreferredmanagementcopy:
+ self.module.fail_json(
+ msg='Parameters not allowed while deletion: replicationpolicy, noreplicationpolicy, preferredmanagementsystem, '
+ 'deletepreferredmanagementcopy'
+ )
+
+ def get_storage_partition_details(self, name):
+ merged_result = {}
+
+ data = self.restapi.svc_obj_info(cmd='lspartition', cmdopts=None, cmdargs=[name])
+
+ if isinstance(data, list):
+ for d in data:
+ merged_result.update(d)
+ else:
+ merged_result = data
+
+ return merged_result
+
+ def create_storage_partition(self):
+ unsupported = ('noreplicationpolicy', 'preferredmanagementsystem', 'deletepreferredmanagementcopy')
+ unsupported_exists = ', '.join((field for field in unsupported if getattr(self, field) not in {'', None}))
+
+ if unsupported_exists:
+ self.module.fail_json(
+ msg='Paramters not supported while creation: {0}'.format(unsupported_exists)
+ )
+
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ cmd = 'mkpartition'
+ cmdopts = {
+ 'name': self.name
+ }
+
+ if self.replicationpolicy:
+ cmdopts['replicationpolicy'] = self.replicationpolicy
+
+ self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
+ self.log('Storage Partition (%s) created', self.name)
+ self.changed = True
+
+ def partition_probe(self, data):
+ if self.replicationpolicy and self.noreplicationpolicy:
+ self.module.fail_json(msg='Mutual exclusive parameters: {0}, {1}'.format("replicationpolicy", "noreplicationpolicy"))
+ if self.replicationpolicy and self.preferredmanagementsystem:
+ self.module.fail_json(msg='Mutual exclusive parameters: {0}, {1}'.format("replicationpolicy", "preferredmanagementsystem"))
+ if self.deletepreferredmanagementcopy and not self.noreplicationpolicy:
+ self.module.fail_json(msg='These parameters must be passed together: {0}, {1}'.format("deletepreferredmanagementcopy", "noreplicationpolicy"))
+
+ # Mapping the parameters with the existing data for comparision
+ params_mapping = (
+ ('replicationpolicy', data.get('replication_policy_name', '')),
+ ('preferredmanagementsystem', data.get('preferred_management_system_name', '')),
+ ('noreplicationpolicy', not bool(data.get('replication_policy_name', '')))
+ )
+
+ props = dict((k, getattr(self, k)) for k, v in params_mapping if getattr(self, k) and getattr(self, k) != v)
+
+ if self.noreplicationpolicy in props:
+ if self.deletepreferredmanagementcopy:
+ props.append('deletepreferredmanagementcopy')
+
+ self.log("Storage Partition props = %s", props)
+
+ return props
+
+ def update_storage_partition(self, updates):
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ cmd = 'chpartition'
+ cmdopts = dict((k, getattr(self, k)) for k in updates)
+ cmdargs = [self.name]
+
+ self.restapi.svc_run_command(cmd, cmdopts=cmdopts, cmdargs=cmdargs)
+ self.changed = True
+
+ def delete_storage_partition(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ cmd = 'rmpartition'
+ cmdopts = {}
+ if self.deletenonpreferredmanagementobjects:
+ cmdopts['deletenonpreferredmanagementobjects'] = self.deletenonpreferredmanagementobjects
+ if self.deletepreferredmanagementobjects:
+ cmdopts['deletepreferredmanagementobjects'] = self.deletepreferredmanagementobjects
+
+ self.restapi.svc_run_command(cmd, cmdopts=cmdopts, cmdargs=[self.name])
+ self.changed = True
+
+ def apply(self):
+ data = self.get_storage_partition_details(self.name)
+
+ if data:
+ if self.state == 'present':
+ modifications = self.partition_probe(data)
+ if modifications:
+ self.update_storage_partition(modifications)
+ self.msg = 'Storage Partition ({0}) updated'.format(self.name)
+ else:
+ self.msg = 'Storage Partition ({0}) already exists. No modifications done.'.format(self.name)
+ else:
+ self.delete_storage_partition()
+ self.msg = 'Storage Partition ({0}) deleted.'.format(self.name)
+ else:
+ if self.state == 'absent':
+ self.msg = 'Storage Partition ({0}) does not exist'.format(self.name)
+ else:
+ self.create_storage_partition()
+ self.msg = 'Storage Partition ({0}) created.'.format(self.name)
+
+ if self.module.check_mode:
+ self.msg = 'skipping changes due to check mode.'
+
+ self.module.exit_json(
+ changed=self.changed,
+ msg=self.msg
+ )
+
+
+def main():
+ v = IBMSVStoragePartition()
+ try:
+ v.apply()
+ except Exception as e:
+ v.log('Exception in apply(): \n%s', format_exc())
+ v.module.fail_json(msg='Module failed. Error [%s].' % to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_sv_manage_syslog_server.py b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_sv_manage_syslog_server.py
new file mode 100644
index 000000000..0f18b784a
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_sv_manage_syslog_server.py
@@ -0,0 +1,462 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2023 IBM CORPORATION
+# Author(s): Shilpi Jain <shilpi.jain1@ibm.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_sv_manage_syslog_server
+short_description: This module manages syslog server on IBM Storage Virtualize family systems
+version_added: '2.1.0'
+description:
+ - This Ansible module provides the interface to manage syslog servers through 'mksyslogserver',
+ 'chsyslogserver' and 'rmsyslogserver' Storage Virtualize commands.
+options:
+ clustername:
+ description:
+ - The hostname or management IP of the Storage Virtualize system.
+ required: true
+ type: str
+ domain:
+ description:
+ - Domain for the Storage Virtualize system.
+ - Valid when hostname is used for the parameter I(clustername).
+ type: str
+ username:
+ description:
+ - REST API username for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ password:
+ description:
+ - REST API password for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ token:
+ description:
+ - The authentication token to verify a user on the Storage Virtualize system.
+ - To generate a token, use the M(ibm.storage_virtualize.ibm_svc_auth) module.
+ type: str
+ log_path:
+ description:
+ - Path of debug log file.
+ type: str
+ state:
+ description:
+ - Creates, updates (C(present)) or deletes (C(absent)) a syslog server.
+ choices: [ present, absent ]
+ required: true
+ type: str
+ name:
+ description:
+ - Specifies the name of a syslog server.
+ type: str
+ required: true
+ old_name:
+ description:
+ - Specifies the old name of a syslog server to rename.
+ - Valid when I(state=present), to rename the existing syslog server.
+ type: str
+ ip:
+ description:
+ - Specifies the Internet Protocol (IP) address or domain name of the syslog server.
+ If a domain name is specified, a DNS server must be configured on the system.
+ type: str
+ facility:
+ description:
+ - Specifies the facility number used in syslog messages.
+ This number identifies the origin of the message to the receiving server. The default value is 0.
+ The parameters I(facility) and I (cadf) are mutually exclusive.
+ type: int
+ error:
+ description:
+ - Specifies whether the server receives error notifications.
+ If specified as on, error notifications are sent to the syslog server. The default value is on.
+ type: str
+ choices: [ 'off', 'on' ]
+ warning:
+ description:
+ - Specifies whether the server receives warning notifications.
+ If specified as on, warning notifications are sent to the syslog server. The default value is on.
+ type: str
+ choices: [ 'off', 'on' ]
+ info:
+ description:
+ - Specifies whether the server receives information notifications.
+ If specified as on, information notifications are sent to the syslog server. The default value is on.
+ type: str
+ choices: [ 'off', 'on' ]
+ audit:
+ description:
+ - Specifies whether the server receives CLI audit logs. The default value is off.
+ type: str
+ choices: [ 'off', 'on' ]
+ login:
+ description:
+ - Specifies whether the server receives authentication logs. The default value is off.
+ type: str
+ choices: [ 'off', 'on' ]
+ protocol:
+ description:
+ - Specifies the communication protocol that is used by this server. The default value is udp.
+ type: str
+ choices: [ tcp, udp ]
+ port:
+ description:
+ - Specifies the communication port that is used by this server.
+ The parameter I(protocol) must be specified while specifying this parameter.
+ The default value is 514 for udp and 6514 for tcp.
+ type: int
+ cadf:
+ description:
+ - Specifies that Cloud Auditing Data Federation (CADF) data reporting be turned on or off.
+ The parameters I(facility) and I (cadf) are mutually exclusive.
+ type: str
+ choices: [ 'off', 'on' ]
+ validate_certs:
+ description:
+ - Validates certification.
+ default: false
+ type: bool
+author:
+ - Shilpi Jain (@Shilpi-J)
+notes:
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = '''
+- name: Create syslog server
+ ibm.storage_virtualize.ibm_sv_manage_syslog_server:
+ clustername: '{{clustername}}'
+ username: '{{username}}'
+ password: '{{password}}'
+ name: server1
+ ip: 1.2.3.4
+ state: present
+- name: Modify the server details
+ ibm.storage_virtualize.ibm_sv_manage_syslog_server:
+ clustername: '{{clustername}}'
+ username: '{{username}}'
+ password: '{{password}}'
+ name: server1
+ info: off
+ state: present
+- name: Delete the syslog server
+ ibm.storage_virtualize.ibm_sv_manage_syslog_server:
+ clustername: '{{clustername}}'
+ username: '{{username}}'
+ password: '{{password}}'
+ name: server1
+ state: absent
+'''
+
+RETURN = '''#'''
+
+from traceback import format_exc
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import (
+ IBMSVCRestApi,
+ svc_argument_spec,
+ get_logger
+)
+from ansible.module_utils._text import to_native
+
+
+class IBMSVSyslogserver:
+
+ def __init__(self):
+ argument_spec = svc_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(
+ type='str',
+ required=True,
+ choices=['present', 'absent']
+ ),
+ name=dict(
+ type='str',
+ required=True
+ ),
+ old_name=dict(
+ type='str'
+ ),
+ ip=dict(
+ type='str'
+ ),
+ facility=dict(
+ type='int'
+ ),
+ error=dict(
+ type='str',
+ choices=['off', 'on']
+ ),
+ warning=dict(
+ type='str',
+ choices=['off', 'on']
+ ),
+ info=dict(
+ type='str',
+ choices=['off', 'on']
+ ),
+ audit=dict(
+ type='str',
+ choices=['off', 'on']
+ ),
+ login=dict(
+ type='str',
+ choices=['off', 'on']
+ ),
+ protocol=dict(
+ type='str',
+ choices=['tcp', 'udp']
+ ),
+ port=dict(
+ type='int'
+ ),
+ cadf=dict(
+ type='str',
+ choices=['off', 'on']
+ )
+ )
+ )
+
+ self.module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ # Required parameters
+ self.name = self.module.params['name']
+ self.state = self.module.params['state']
+
+ # Optional parameters
+ self.old_name = self.module.params.get('old_name', '')
+ self.ip = self.module.params.get('ip', '')
+ self.facility = self.module.params.get('facility', '')
+ self.error = self.module.params.get('error', '')
+ self.warning = self.module.params.get('warning', '')
+ self.info = self.module.params.get('info', '')
+ self.audit = self.module.params.get('audit', '')
+ self.login = self.module.params.get('login', '')
+ self.protocol = self.module.params.get('protocol', '')
+ self.port = self.module.params.get('port', '')
+ self.cadf = self.module.params.get('cadf', '')
+
+ # logging setup
+ self.log_path = self.module.params['log_path']
+ log = get_logger(self.__class__.__name__, self.log_path)
+ self.log = log.info
+
+ self.basic_checks()
+
+ # Dynamic variables
+ self.changed = False
+ self.msg = ''
+
+ self.restapi = IBMSVCRestApi(
+ module=self.module,
+ clustername=self.module.params['clustername'],
+ domain=self.module.params['domain'],
+ username=self.module.params['username'],
+ password=self.module.params['password'],
+ validate_certs=self.module.params['validate_certs'],
+ log_path=self.log_path,
+ token=self.module.params['token']
+ )
+
+ def basic_checks(self):
+ if not self.name:
+ self.module.fail_json(msg='Missing mandatory parameter: name')
+
+ if self.state == 'present':
+ if self.facility is not None and self.cadf is not None:
+ self.module.fail_json(
+ msg='Mutually exclusive parameters: facility, cadf'
+ )
+ if not self.protocol and self.port:
+ self.module.fail_json(
+ msg='These parameters are required together: protocol, port'
+ )
+ if self.old_name:
+ unsupported = ('ip', 'facility', 'error', 'warning', 'info', 'login', 'audit', 'protocol', 'port', 'cadf')
+ unsupported_exists = ', '.join((var for var in unsupported if getattr(self, var) not in {'', None}))
+
+ if unsupported_exists:
+ self.module.fail_json(
+ msg='Following paramters are not supported while renaming: {0}'.format(unsupported_exists)
+ )
+ elif self.state == 'absent':
+ invalids = ('ip', 'facility', 'error', 'warning', 'info', 'login', 'audit', 'protocol', 'port', 'cadf', 'old_name')
+ invalid_exists = ', '.join((var for var in invalids if getattr(self, var) not in {'', None}))
+
+ if invalid_exists:
+ self.module.fail_json(
+ msg='state=absent but following paramters have been passed: {0}'.format(invalid_exists)
+ )
+
+ def get_syslog_server_details(self, server_name):
+ merged_result = {}
+
+ data = self.restapi.svc_obj_info(cmd='lssyslogserver', cmdopts=None, cmdargs=[server_name])
+
+ if isinstance(data, list):
+ for d in data:
+ merged_result.update(d)
+ else:
+ merged_result = data
+
+ return merged_result
+
+ def create_server(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ cmd = 'mksyslogserver'
+ cmdopts = {
+ 'name': self.name
+ }
+
+ if self.ip:
+ cmdopts['ip'] = self.ip
+ if self.facility is not None:
+ cmdopts['facility'] = self.facility
+ if self.error:
+ cmdopts['error'] = self.error
+ if self.warning:
+ cmdopts['warning'] = self.warning
+ if self.info:
+ cmdopts['info'] = self.info
+ if self.audit:
+ cmdopts['audit'] = self.audit
+ if self.login:
+ cmdopts['login'] = self.login
+ if self.protocol:
+ cmdopts['protocol'] = self.protocol
+ if self.port:
+ cmdopts['port'] = self.port
+ if self.cadf:
+ cmdopts['cadf'] = self.cadf
+
+ self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
+ self.log('Syslog server (%s) created', self.name)
+ self.changed = True
+
+ def server_probe(self, server_data):
+ updates = []
+
+ if self.ip and server_data['IP_address'] != self.ip:
+ updates.append('ip')
+ if self.facility is not None and server_data['facility'] != self.facility:
+ updates.append('facility')
+ if self.error and server_data['error'] != self.error:
+ updates.append('error')
+ if self.warning and server_data['warning'] != self.warning:
+ updates.append('warning')
+ if self.info and server_data['info'] != self.info:
+ updates.append('info')
+ if self.audit and server_data['audit'] != self.audit:
+ updates.append('audit')
+ if self.login and server_data['login'] != self.login:
+ updates.append('login')
+ if self.port is not None:
+ if int(server_data['port']) != self.port:
+ updates.append('port')
+ updates.append('protocol')
+ if self.protocol and server_data['protocol'] != self.protocol:
+ updates.append('protocol')
+ if self.cadf and server_data['cadf'] != self.cadf:
+ updates.append('cadf')
+
+ self.log('Syslogserver probe result: %s', updates)
+ return updates
+
+ def rename_server(self, server_data):
+ msg = ''
+ old_name_data = self.get_syslog_server_details(self.old_name)
+ if not old_name_data and not server_data:
+ self.module.fail_json(msg="Syslog server with old name {0} doesn't exist.".format(self.old_name))
+ elif old_name_data and server_data:
+ self.module.fail_json(msg="Syslog server [{0}] already exists.".format(self.name))
+ elif not old_name_data and server_data:
+ msg = "Syslog server with name [{0}] already exists.".format(self.name)
+ elif old_name_data and not server_data:
+ # when check_mode is enabled
+ if self.module.check_mode:
+ self.changed = True
+ return
+ self.restapi.svc_run_command('chsyslogserver', {'name': self.name}, [self.old_name])
+ self.changed = True
+ msg = "Syslog server [{0}] has been successfully rename to [{1}].".format(self.old_name, self.name)
+ return msg
+
+ def update_server(self, updates):
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ cmd = 'chsyslogserver'
+ cmdopts = dict((k, getattr(self, k)) for k in updates)
+ cmdargs = [self.name]
+
+ self.restapi.svc_run_command(cmd, cmdopts=cmdopts, cmdargs=cmdargs)
+ self.changed = True
+
+ def delete_server(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ self.restapi.svc_run_command('rmsyslogserver', None, [self.name])
+ self.changed = True
+
+ def apply(self):
+ server_data = self.get_syslog_server_details(self.name)
+
+ if self.state == 'present' and self.old_name:
+ self.msg = self.rename_server(server_data)
+ elif self.state == 'absent' and self.old_name:
+ self.module.fail_json(msg="Rename functionality is not supported when 'state' is absent.")
+ else:
+ if server_data:
+ if self.state == 'present':
+ modifications = self.server_probe(server_data)
+ if any(modifications):
+ self.update_server(modifications)
+ self.msg = 'Syslog server ({0}) updated.'.format(self.name)
+ else:
+ self.msg = 'Syslog server ({0}) already exists. No modifications done.'.format(self.name)
+ else:
+ self.delete_server()
+ self.msg = 'Syslog server ({0}) deleted successfully.'.format(self.name)
+ else:
+ if self.state == 'absent':
+ self.msg = 'Syslog server ({0}) does not exist. No modifications done.'.format(self.name)
+ else:
+ self.create_server()
+ self.msg = 'Syslog server ({0}) created successfully.'.format(self.name)
+
+ if self.module.check_mode:
+ self.msg = 'skipping changes due to check mode.'
+
+ self.module.exit_json(
+ changed=self.changed,
+ msg=self.msg
+ )
+
+
+def main():
+ v = IBMSVSyslogserver()
+ try:
+ v.apply()
+ except Exception as e:
+ v.log('Exception in apply(): \n%s', format_exc())
+ v.module.fail_json(msg='Module failed. Error [%s].' % to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_sv_manage_truststore_for_replication.py b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_sv_manage_truststore_for_replication.py
new file mode 100644
index 000000000..e7bb2e0e6
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_sv_manage_truststore_for_replication.py
@@ -0,0 +1,401 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2022 IBM CORPORATION
+# Author(s): Sanjaikumaar M <sanjaikumaar.m@ibm.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_sv_manage_truststore_for_replication
+short_description: This module manages certificate trust stores for replication on
+ IBM Storage Virtualize family systems
+version_added: '1.10.0'
+description:
+ - Ansible interface to manage mktruststore and rmtruststore commands.
+ - This module transfers the certificate from a remote system to the local system.
+ - This module works on SSH and uses paramiko to establish an SSH connection.
+ - Once transfer is done successfully, it also adds the certificate to the trust store of the local system.
+ - This module can be used to set up mutual TLS (mTLS) for policy-based replication inter-system communication
+ using cluster endpoint certificates (usually system-signed which are exported by the
+ M(ibm.storage_virtualize.ibm_sv_manage_ssl_certificate) module).
+options:
+ clustername:
+ description:
+ - The hostname or management IP of the Storage Virtualize system.
+ required: true
+ type: str
+ username:
+ description:
+ - Username for the Storage Virtualize system.
+ type: str
+ required: true
+ password:
+ description:
+ - Password for the Storage Virtualize system.
+ - Mandatory, when I(usesshkey=no).
+ type: str
+ usesshkey:
+ description:
+ - For key-pair based SSH connection, set this field as "yes".
+ Provide full path of key in key_filename field.
+ If not provided, default path of SSH key is used.
+ type: str
+ choices: [ 'yes', 'no']
+ default: 'no'
+ key_filename:
+ description:
+ - SSH client private key filename. By default, ~/.ssh/id_rsa is used.
+ type: str
+ log_path:
+ description:
+ - Path of debug log file.
+ type: str
+ state:
+ description:
+ - Creates (C(present)) or deletes (C(absent)) a trust store.
+ choices: [ present, absent ]
+ required: true
+ type: str
+ name:
+ description:
+ - Specifies the name of the trust store.
+ - If not specified, the module generates a name automatically with format store_I(remote_clustername).
+ type: str
+ remote_clustername:
+ description:
+ - Specifies the name of the partner remote cluster with which mTLS partnership needs to be setup.
+ type: str
+ required: true
+ remote_username:
+ description:
+ - Username for remote cluster.
+ - Applies when I(state=present) to create a trust store.
+ type: str
+ remote_password:
+ description:
+ - Password for remote cluster.
+ - Applies when I(state=present) to create a trust store.
+ type: str
+author:
+ - Sanjaikumaar M(@sanjaikumaar)
+notes:
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = '''
+- name: Create truststore
+ ibm.storage_virtualize.ibm_sv_manage_truststore_for_replication:
+ clustername: "{{clustername}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ name: "{{name}}"
+ remote_clustername: "{{remote_clustername}}"
+ remote_username: "{{remote_username}}"
+ remote_password: "{{remote_password}}"
+ log_path: "{{log_path}}"
+ state: "present"
+- name: Delete truststore
+ ibm.storage_virtualize.ibm_sv_manage_truststore_for_replication:
+ clustername: "{{clustername}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ name: "{{name}}"
+ remote_clustername: "{{remote_clustername}}"
+ log_path: "{{log_path}}"
+ state: "absent"
+'''
+
+RETURN = '''#'''
+
+from traceback import format_exc
+import json
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import (
+ svc_ssh_argument_spec,
+ get_logger
+)
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_ssh import IBMSVCssh
+from ansible.module_utils._text import to_native
+
+
+class IBMSVTrustStore:
+
+ def __init__(self):
+ argument_spec = svc_ssh_argument_spec()
+ argument_spec.update(
+ dict(
+ password=dict(
+ type='str',
+ required=False,
+ no_log=True
+ ),
+ name=dict(
+ type='str'
+ ),
+ usesshkey=dict(
+ type='str',
+ default='no',
+ choices=['yes', 'no']
+ ),
+ key_filename=dict(
+ type='str',
+ ),
+ state=dict(
+ type='str',
+ choices=['present', 'absent'],
+ required=True
+ ),
+ remote_clustername=dict(
+ type='str',
+ required=True
+ ),
+ remote_username=dict(
+ type='str',
+ ),
+ remote_password=dict(
+ type='str',
+ no_log=True
+ ),
+ )
+ )
+
+ self.module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ # logging setup
+ self.log_path = self.module.params['log_path']
+ log = get_logger(self.__class__.__name__, self.log_path)
+ self.log = log.info
+
+ # Required parameters
+ self.state = self.module.params['state']
+ self.remote_clustername = self.module.params['remote_clustername']
+
+ # local SSH keys will be used in case of password less SSH connection
+ self.usesshkey = self.module.params['usesshkey']
+ self.key_filename = self.module.params['key_filename']
+
+ # Optional parameters
+ self.password = self.module.params.get('password', '')
+ self.name = self.module.params.get('name', '')
+ self.remote_username = self.module.params.get('remote_username', '')
+ self.remote_password = self.module.params.get('remote_password', '')
+
+ if not self.name:
+ self.name = 'store_{0}'.format(self.remote_clustername)
+
+ if not self.password:
+ if self.usesshkey == 'yes':
+ self.log("password is none and use ssh private key. Check for its path")
+ if self.key_filename:
+ self.log("key file_name is provided, use it")
+ self.look_for_keys = True
+ else:
+ self.log("key file_name is not provided, use default one, ~/.ssh/id_rsa.pub")
+ self.look_for_keys = True
+ else:
+ self.log("password is none and SSH key is not provided")
+ self.module.fail_json(msg="You must pass either password or usesshkey parameter.")
+ else:
+ self.log("password is given")
+ self.look_for_keys = False
+
+ self.basic_checks()
+
+ # Dynamic variables
+ self.changed = False
+ self.msg = ''
+
+ self.ssh_client = IBMSVCssh(
+ module=self.module,
+ clustername=self.module.params['clustername'],
+ username=self.module.params['username'],
+ password=self.password,
+ look_for_keys=self.look_for_keys,
+ key_filename=self.key_filename,
+ log_path=self.log_path
+ )
+
+ def basic_checks(self):
+ if self.state == 'present':
+ if not self.remote_clustername:
+ self.module.fail_json(
+ msg='Missing mandatory parameter: remote_clustername'
+ )
+ if not self.remote_username:
+ self.module.fail_json(
+ msg='Missing mandatory parameter: remote_username'
+ )
+ if not self.remote_password:
+ self.module.fail_json(
+ msg='Missing mandatory parameter: remote_password'
+ )
+ elif self.state == 'absent':
+ if not self.remote_clustername:
+ self.module.fail_json(
+ msg='Missing mandatory parameter: remote_clustername'
+ )
+
+ unsupported = ('remote_username', 'remote_password')
+ unsupported_exists = ', '.join((field for field in unsupported if getattr(self, field)))
+ if unsupported_exists:
+ self.module.fail_json(
+ msg='state=absent but following paramters have been passed: {0}'.format(unsupported_exists)
+ )
+
+ def raise_error(self, stderr):
+ message = stderr.read().decode('utf-8')
+ if len(message) > 0:
+ self.log("%s", message)
+ self.module.fail_json(msg=message)
+ else:
+ message = 'Unknown error received.'
+ self.module.fail_json(msg=message)
+
+ def is_truststore_exists(self):
+ merged_result = {}
+ cmd = 'lstruststore -json {0}'.format(self.name)
+ stdin, stdout, stderr = self.ssh_client.client.exec_command(cmd)
+ result = stdout.read().decode('utf-8')
+
+ if result:
+ result = json.loads(result)
+ else:
+ return merged_result
+
+ rc = stdout.channel.recv_exit_status()
+ if rc > 0:
+ message = stderr.read().decode('utf-8')
+ if (message.count('CMMVC5804E') != 1) or (message.count('CMMVC6035E') != 1):
+ self.log("Error in executing CLI command: %s", cmd)
+ self.log("%s", message)
+ self.module.fail_json(msg=message)
+ else:
+ self.log("Expected error: %s", message)
+
+ if isinstance(result, list):
+ for d in result:
+ merged_result.update(d)
+ else:
+ merged_result = result
+
+ return merged_result
+
+ def download_file(self):
+ if self.module.check_mode:
+ return
+
+ cmd = 'scp -o stricthostkeychecking=no {0}@{1}:/dumps/certificate.pem /upgrade/'.format(
+ self.remote_username,
+ self.remote_clustername
+ )
+ self.log('Command to be executed: %s', cmd)
+ stdin, stdout, stderr = self.ssh_client.client.exec_command(cmd, get_pty=True, timeout=60 * 1.5)
+ result = ''
+ while not stdout.channel.recv_ready():
+ data = stdout.channel.recv(1024)
+ self.log(str(data, 'utf-8'))
+ if data:
+ if b'Password:' in data or b'password' in data:
+ stdin.write("{0}\n".format(self.remote_password))
+ stdin.flush()
+ else:
+ result += data.decode('utf-8')
+ break
+
+ result += stdout.read().decode('utf-8')
+ rc = stdout.channel.recv_exit_status()
+ if rc > 0:
+ message = stderr.read().decode('utf-8')
+ self.log("Error in executing command: %s", cmd)
+ if not len(message) > 1:
+ if len(result) > 1:
+ err = result.replace('\rPassword:\r\n', '')
+ self.log("Error: %s", err)
+ if err:
+ self.module.fail_json(msg=err)
+ self.module.fail_json(msg='Unknown error received')
+ else:
+ self.module.fail_json(msg=message)
+ else:
+ self.log(result)
+
+ def create_truststore(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ cmd = 'mktruststore -name {0} -file {1}'.format(self.name, '/upgrade/certificate.pem')
+ self.log('Command to be executed: %s', cmd)
+ stdin, stdout, stderr = self.ssh_client.client.exec_command(cmd)
+ result = stdout.read().decode('utf-8')
+ rc = stdout.channel.recv_exit_status()
+
+ if rc > 0:
+ self.log("Error in executing command: %s", cmd)
+ self.raise_error(stderr)
+ else:
+ self.log('Truststore (%s) created', self.name)
+ self.log(result)
+ self.changed = True
+
+ def delete_truststore(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ cmd = 'rmtruststore {0}'.format(self.name)
+ self.log('Command to be executed: %s', cmd)
+ stdin, stdout, stderr = self.ssh_client.client.exec_command(cmd)
+ result = stdout.read().decode('utf-8')
+ rc = stdout.channel.recv_exit_status()
+
+ if rc > 0:
+ self.log("Error in executing command: %s", cmd)
+ self.raise_error(stderr)
+ else:
+ self.log('Truststore (%s) deleted', self.name)
+ self.log(result)
+ self.changed = True
+
+ def apply(self):
+ if self.is_truststore_exists():
+ self.log("Truststore (%s) exists", self.name)
+ if self.state == 'present':
+ self.msg = 'Truststore ({0}) already exist. No modifications done'.format(self.name)
+ else:
+ self.delete_truststore()
+ self.msg = 'Truststore ({0}) deleted.'.format(self.name)
+ else:
+ if self.state == 'absent':
+ self.msg = 'Truststore ({0}) does not exist. No modifications done.'.format(self.name)
+ else:
+ self.download_file()
+ self.create_truststore()
+ self.msg = 'Truststore ({0}) created.'.format(self.name)
+
+ if self.module.check_mode:
+ self.msg = 'skipping changes due to check mode.'
+
+ self.module.exit_json(
+ changed=self.changed,
+ msg=self.msg
+ )
+
+
+def main():
+ v = IBMSVTrustStore()
+ try:
+ v.apply()
+ except Exception as e:
+ v.log('Exception in apply(): \n%s', format_exc())
+ v.module.fail_json(msg='Module failed. Error [%s].' % to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_sv_restore_cloud_backup.py b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_sv_restore_cloud_backup.py
new file mode 100644
index 000000000..7e46f4c72
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_sv_restore_cloud_backup.py
@@ -0,0 +1,304 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2022 IBM CORPORATION
+# Author(s): Sanjaikumaar M <sanjaikumaar.m@ibm.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_sv_restore_cloud_backup
+short_description: This module restores the cloud backup on IBM Storage Virtualize family systems
+version_added: '1.11.0'
+description:
+ - Ansible interface to manage restorevolume command.
+options:
+ clustername:
+ description:
+ - The hostname or management IP of the Storage Virtualize system.
+ required: true
+ type: str
+ domain:
+ description:
+ - Domain for the Storage Virtualize system.
+ - Valid when hostname is used for the parameter I(clustername).
+ type: str
+ username:
+ description:
+ - REST API username for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ password:
+ description:
+ - REST API password for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ token:
+ description:
+ - The authentication token to verify a user on the Storage Virtualize system.
+ - To generate a token, use the M(ibm.storage_virtualize.ibm_svc_auth) module.
+ type: str
+ log_path:
+ description:
+ - Path of debug log file.
+ type: str
+ target_volume_name:
+ description:
+ - Specifies the volume name to restore onto.
+ type: str
+ required: true
+ source_volume_uid:
+ description:
+ - Specifies the volume snapshot to restore (specified by volume UID).
+ - This parameter is required to restore a backup from a different volume.
+ - Specified UID must be different from the UID of the volume being restored.
+ type: str
+ generation:
+ description:
+ - Specifies the snapshot generation to restore. The value must be a number.
+ type: int
+ restoreuid:
+ description:
+ - Specifies the UID of the restored volume should be set to the UID
+ of the volume snapshot that is being restored.
+ - This parameter can be used only with I(source_volume_uid).
+ - The I(restoreuid) parameter is not supported if cloud account is in import mode.
+ type: bool
+ deletelatergenerations:
+ description:
+ - Specifies that all backup generations should be deleted after the generation is restored.
+ type: bool
+ cancel:
+ description:
+ - Specifies to cancel the restore operation.
+ type: bool
+ validate_certs:
+ description:
+ - Validates certification.
+ default: false
+ type: bool
+author:
+ - Sanjaikumaar M (@sanjaikumaar)
+notes:
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = '''
+- name: Restore cloud backup
+ ibm.storage_virtualize.ibm_sv_restore_cloud_backup:
+ clustername: "{{cluster_A}}"
+ username: "{{username_A}}"
+ password: "{{password_A}}"
+ target_volume_name: vol1
+ source_volume_uid: 6005076400B70038E00000000000001C
+ generation: 1
+- name: Restore cloud backup to different cluster
+ ibm.storage_virtualize.ibm_sv_restore_cloud_backup:
+ clustername: "{{cluster_B}}"
+ username: "{{username_B}}"
+ password: "{{password_B}}"
+ target_volume_name: vol2
+ source_volume_uid: 6005076400B70038E00000000000001C
+ generation: 1
+- name: Cancel restore operation
+ ibm.storage_virtualize.ibm_sv_restore_cloud_backup:
+ clustername: "{{cluster_A}}"
+ username: "{{username_A}}"
+ password: "{{password_A}}"
+ target_volume_name: vol1
+ cancel: true
+'''
+
+RETURN = '''#'''
+
+from traceback import format_exc
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import (
+ IBMSVCRestApi, svc_argument_spec,
+ get_logger
+)
+from ansible.module_utils._text import to_native
+
+
+class IBMSVRestoreCloudBackup:
+
+ def __init__(self):
+ argument_spec = svc_argument_spec()
+ argument_spec.update(
+ dict(
+ target_volume_name=dict(
+ type='str',
+ required=True
+ ),
+ source_volume_uid=dict(
+ type='str'
+ ),
+ generation=dict(
+ type='int',
+ ),
+ restoreuid=dict(
+ type='bool'
+ ),
+ deletelatergenerations=dict(
+ type='bool'
+ ),
+ cancel=dict(
+ type='bool'
+ ),
+ )
+ )
+
+ self.module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ # Required parameters
+ self.target_volume_name = self.module.params.get('target_volume_name', '')
+ self.source_volume_uid = self.module.params.get('source_volume_uid', '')
+ self.generation = self.module.params.get('generation', '')
+ self.restoreuid = self.module.params.get('restoreuid', '')
+ self.deletelatergenerations = self.module.params.get('deletelatergenerations', False)
+ self.cancel = self.module.params.get('cancel', False)
+
+ self.basic_checks()
+
+ # logging setup
+ self.log_path = self.module.params['log_path']
+ log = get_logger(self.__class__.__name__, self.log_path)
+ self.log = log.info
+
+ # Dynamic variables
+ self.changed = False
+ self.msg = ''
+
+ self.restapi = IBMSVCRestApi(
+ module=self.module,
+ clustername=self.module.params['clustername'],
+ domain=self.module.params['domain'],
+ username=self.module.params['username'],
+ password=self.module.params['password'],
+ validate_certs=self.module.params['validate_certs'],
+ log_path=self.log_path,
+ token=self.module.params['token']
+ )
+
+ def basic_checks(self):
+ if not self.target_volume_name:
+ self.module.fail_json(msg='Missing mandatory parameter: target_volume_name')
+
+ if self.cancel:
+ invalids = ('source_volume_uid', 'generation', 'restoreuid', 'deletelatergenerations')
+ invalid_exists = ', '.join((var for var in invalids if getattr(self, var) not in {'', None}))
+
+ if invalid_exists:
+ self.module.fail_json(
+ msg='Parameters not supported during restore cancellation: {0}'.format(invalid_exists)
+ )
+
+ def validate(self):
+ if not self.cancel:
+ cmd = 'lsvolumebackupgeneration'
+ cmdargs = None
+ cmdopts = {}
+
+ if self.source_volume_uid:
+ cmdopts['uid'] = self.source_volume_uid
+ else:
+ cmdopts['volume'] = self.target_volume_name
+
+ result = self.restapi.svc_obj_info(cmd=cmd, cmdopts=cmdopts, cmdargs=cmdargs)
+ else:
+ result = True
+ cmd = 'lsvdisk'
+ vdata = {}
+ data = self.restapi.svc_obj_info(cmd=cmd, cmdopts=None, cmdargs=[self.target_volume_name])
+
+ if isinstance(data, list):
+ for d in data:
+ vdata.update(d)
+ else:
+ vdata = data
+
+ if vdata and self.cancel and vdata['restore_status'] in {'none', 'available'}:
+ self.module.exit_json(
+ msg='No restore operation is in progress for the volume ({0}).'.format(self.target_volume_name)
+ )
+
+ return result
+
+ def restore_volume(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ cmd = 'restorevolume'
+ cmdargs = [self.target_volume_name]
+ cmdopts = {}
+
+ if self.cancel:
+ cmdopts['cancel'] = self.cancel
+ self.msg = 'Restore operation on volume ({0}) cancelled.'.format(self.target_volume_name)
+ else:
+ if self.source_volume_uid:
+ cmdopts['fromuid'] = self.source_volume_uid
+
+ if self.generation:
+ cmdopts['generation'] = self.generation
+
+ if self.restoreuid:
+ cmdopts['restoreuid'] = self.restoreuid
+
+ if self.deletelatergenerations:
+ cmdopts['deletelatergenerations'] = self.deletelatergenerations
+
+ self.msg = 'Restore operation on volume ({0}) started.'.format(self.target_volume_name)
+
+ response = self.restapi._svc_token_wrap(cmd, cmdopts, cmdargs=cmdargs)
+ self.log('response=%s', response)
+ self.changed = True
+
+ if response['out']:
+ if b'CMMVC9103E' in response['out']:
+ self.msg = 'CMMVC9103E: Volume ({0}) is not ready to perform any operation right now.'.format(
+ self.target_volume_name
+ )
+ self.changed = False
+ elif b'CMMVC9099E' in response['out']:
+ self.msg = 'No restore operation is in progress for the volume ({0}).'.format(self.target_volume_name)
+ self.changed = False
+ else:
+ self.module.fail_json(msg=response)
+
+ def apply(self):
+ if self.validate():
+ self.restore_volume()
+ self.log(self.msg)
+ else:
+ self.msg = 'No backup exist for the given source UID/volume.'
+ self.log(self.msg)
+ self.module.fail_json(msg=self.msg)
+
+ if self.module.check_mode:
+ self.msg = 'skipping changes due to check mode.'
+ self.log(self.msg)
+
+ self.module.exit_json(
+ changed=self.changed,
+ msg=self.msg
+ )
+
+
+def main():
+ v = IBMSVRestoreCloudBackup()
+ try:
+ v.apply()
+ except Exception as e:
+ v.log('Exception in apply(): \n%s', format_exc())
+ v.module.fail_json(msg='Module failed. Error [%s].' % to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_sv_switch_replication_direction.py b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_sv_switch_replication_direction.py
new file mode 100644
index 000000000..2d5e88a68
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_sv_switch_replication_direction.py
@@ -0,0 +1,187 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2022 IBM CORPORATION
+# Author(s): Shilpi Jain <shilpi.jain1@ibm.com>
+#
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_sv_switch_replication_direction
+short_description: This module switches the replication direction on IBM Storage Virtualize family systems
+version_added: '1.10.0'
+description:
+ - Ansible interface to manage the chvolumegroupreplication command.
+ - This module can be used to switch replication direction.
+options:
+ clustername:
+ description:
+ - The hostname or management IP of the Storage Virtualize system.
+ required: true
+ type: str
+ domain:
+ description:
+ - Domain for the Storage Virtualize system.
+ - Valid when the hostname is used for the parameter I(clustername).
+ type: str
+ username:
+ description:
+ - REST API username for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ password:
+ description:
+ - REST API password for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ token:
+ description:
+ - The authentication token to verify a user on the Storage Virtualize system.
+ - To generate a token, use the M(ibm.storage_virtualize.ibm_svc_auth) module.
+ type: str
+ log_path:
+ description:
+ - Path of debug log file.
+ type: str
+ name:
+ description:
+ - Specifies the name of the volume group.
+ type: str
+ required: true
+ mode:
+ description:
+ - Specifies the replication mode of the volume group.
+ choices: [ independent, production ]
+ required: true
+ type: str
+ validate_certs:
+ description:
+ - Validates certification.
+ default: false
+ type: bool
+author:
+ - Shilpi Jain(@Shilpi-J)
+notes:
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = '''
+- name: Switch to independent mode
+ ibm.storage_virtualize.ibm_sv_switch_replication_direction:
+ clustername: "{{ clustername }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ mode: independent
+ name: vg0
+'''
+
+RETURN = '''#'''
+
+from traceback import format_exc
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import (
+ IBMSVCRestApi, svc_argument_spec,
+ get_logger
+)
+from ansible.module_utils._text import to_native
+
+
+class IBMSVSwitchReplication:
+
+ def __init__(self):
+ argument_spec = svc_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(
+ type='str',
+ required=True
+ ),
+ mode=dict(
+ type='str',
+ choices=['independent', 'production'],
+ required=True
+ )
+ )
+ )
+
+ self.module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ # Required parameters
+ self.name = self.module.params['name']
+ self.mode = self.module.params['mode']
+
+ self.basic_checks()
+
+ # logging setup
+ self.log_path = self.module.params['log_path']
+ log = get_logger(self.__class__.__name__, self.log_path)
+ self.log = log.info
+
+ # Dynamic variables
+ self.changed = False
+ self.msg = ''
+
+ self.restapi = IBMSVCRestApi(
+ module=self.module,
+ clustername=self.module.params['clustername'],
+ domain=self.module.params['domain'],
+ username=self.module.params['username'],
+ password=self.module.params['password'],
+ validate_certs=self.module.params['validate_certs'],
+ log_path=self.log_path,
+ token=self.module.params['token']
+ )
+
+ def basic_checks(self):
+ if not self.name:
+ self.module.fail_json(
+ msg='Missing mandatory parameter: name'
+ )
+
+ # function to check whether volume group exists or not
+ def get_volumegroup_info(self):
+ return self.restapi.svc_obj_info(
+ 'lsvolumegroup', None, [self.name]
+ )
+
+ def change_vg_mode(self):
+ cmd = 'chvolumegroupreplication'
+ cmdopts = {}
+ cmdopts["mode"] = self.mode
+ self.log("Changing replicaiton direction.. Command %s opts %s", cmd, cmdopts)
+ self.restapi.svc_run_command(cmd, cmdopts, cmdargs=[self.name])
+
+ def apply(self):
+ if self.module.check_mode:
+ self.msg = 'skipping changes due to check mode.'
+ else:
+ if self.get_volumegroup_info():
+ self.change_vg_mode()
+ self.changed = True
+ self.msg = "Replication direction on volume group [%s] has been modified." % self.name
+ else:
+ self.module.fail_json(msg="Volume group does not exist: [%s]" % self.name)
+
+ self.module.exit_json(
+ changed=self.changed,
+ msg=self.msg
+ )
+
+
+def main():
+ v = IBMSVSwitchReplication()
+ try:
+ v.apply()
+ except Exception as e:
+ v.log('Exception in apply(): \n%s', format_exc())
+ v.module.fail_json(msg='Module failed. Error [%s].' % to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_auth.py b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_auth.py
new file mode 100644
index 000000000..e5448097a
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_auth.py
@@ -0,0 +1,133 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2021 IBM CORPORATION
+# Author(s): Shilpi Jain <shilpi.jain1@ibm.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_svc_auth
+short_description: This module generates an authentication token for a user on IBM Storage Virtualize family system
+description:
+ - Ansible interface to generate the authentication token.
+ The token is used to make REST API calls to the storage system.
+version_added: "1.5.0"
+options:
+ clustername:
+ description:
+ - The hostname or management IP of the Storage Virtualize system.
+ type: str
+ required: true
+ domain:
+ description:
+ - Domain for the Storage Virtualize system.
+ - Valid when hostname is used for the parameter I(clustername).
+ type: str
+ username:
+ description:
+ - REST API username for the Storage Virtualize system.
+ - This parameter is required in this module to generate the token.
+ type: str
+ password:
+ description:
+ - REST API password for the Storage Virtualize system.
+ - This parameter is required in this module to generate the token.
+ type: str
+ token:
+ description:
+ - The authentication token to verify a user on the Storage Virtualize system.
+ - This field is not required for ibm_svc_auth module.
+ type: str
+ validate_certs:
+ description:
+ - Validates certification.
+ default: false
+ type: bool
+ log_path:
+ description:
+ - Path of debug log file.
+ type: str
+author:
+ - Shilpi Jain(@Shilpi-J)
+notes:
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = '''
+- name: Obtain an authentication token
+ register: result
+ ibm.storage_virtualize.ibm_svc_auth:
+ clustername: "{{clustername}}"
+ domain: "{{domain}}"
+ username: "{{username}}"
+ password: "{{password}}"
+- name: Create a volume
+ ibm.storage_virtualize.ibm_svc_manage_volume:
+ clustername: "{{clustername}}"
+ domain: "{{domain}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ token: "{{result.token}}"
+ name: volume0
+ state: present
+ pool: Pool0
+ size: "4294967296"
+ unit: b
+'''
+
+RETURN = '''
+token:
+ description: Authentication token for a user.
+ returned: success
+ type: str
+ version_added: 1.5.0
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi, svc_argument_spec, get_logger
+from ansible.module_utils._text import to_native
+
+
+class IBMSVCauth(object):
+ def __init__(self):
+ argument_spec = svc_argument_spec()
+
+ self.module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ # logging setup
+ log_path = self.module.params['log_path']
+ log = get_logger(self.__class__.__name__, log_path)
+ self.log = log.info
+
+ self.restapi = IBMSVCRestApi(
+ module=self.module,
+ clustername=self.module.params['clustername'],
+ domain=self.module.params['domain'],
+ username=self.module.params['username'],
+ password=self.module.params['password'],
+ validate_certs=self.module.params['validate_certs'],
+ log_path=log_path,
+ token=None
+ )
+
+
+def main():
+ v = IBMSVCauth()
+ try:
+ if v.restapi.token is not None:
+ msg = "Authentication token generated"
+ v.module.exit_json(msg=msg, token=v.restapi.token)
+ else:
+ msg = "Authentication token is not generated"
+ v.module.fail_json(msg=msg, token=v.restapi.token)
+ except Exception as e:
+ v.module.fail_json(msg="Module failed. Error [%s]." % to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_complete_initial_setup.py b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_complete_initial_setup.py
new file mode 100644
index 000000000..e7b3b0628
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_complete_initial_setup.py
@@ -0,0 +1,142 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2022 IBM CORPORATION
+# Author(s): Shilpi Jain <shilpi.jain1@ibm.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_svc_complete_initial_setup
+short_description: This module completes the initial setup configuration for LMC systems
+description:
+ - It disables the GUI setup wizard for LMC systems.
+ - It is recommended to run this module after using ibm_svc_initial_setup module for intial setup configuration.
+ - This module works on SSH. Paramiko must be installed to use this module.
+version_added: "1.8.0"
+options:
+ clustername:
+ description:
+ - The hostname or management IP of the Storage Virtualize system.
+ type: str
+ required: true
+ username:
+ description:
+ - Username for the Storage Virtualize system.
+ type: str
+ required: true
+ password:
+ description:
+ - Password for the Storage Virtualize system.
+ type: str
+ required: true
+ log_path:
+ description:
+ - Path of debug log file.
+ type: str
+author:
+ - Shilpi Jain(@Shilpi-J)
+notes:
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = '''
+- name: complete intial setup
+ ibm.storage_virtualize.ibm_svc_complete_initial_setup:
+ clustername: "{{clustername}}"
+ username: "{{username}}"
+ password: "{{password}}"
+'''
+
+RETURN = '''# '''
+
+from traceback import format_exc
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import svc_ssh_argument_spec, get_logger
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_ssh import IBMSVCssh
+
+
+class IBMSVCCompleteSetup(object):
+ def __init__(self):
+ argument_spec = svc_ssh_argument_spec()
+
+ self.module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ # logging setup
+ log_path = self.module.params['log_path']
+ log = get_logger(self.__class__.__name__, log_path)
+ self.log = log.info
+
+ self.ssh_client = IBMSVCssh(
+ module=self.module,
+ clustername=self.module.params['clustername'],
+ username=self.module.params['username'],
+ password=self.module.params['password'],
+ look_for_keys=None,
+ key_filename=None,
+ log_path=log_path
+ )
+
+ def is_lmc(self):
+ info_output = ""
+
+ cmd = 'svcinfo lsguicapabilities'
+ stdin, stdout, stderr = self.ssh_client.client.exec_command(cmd)
+
+ for line in stdout.readlines():
+ info_output += line
+ if 'login_eula yes' in info_output:
+ self.log("The system is non LMC")
+ return False
+ else:
+ self.log("The system is LMC")
+ return True
+
+ def disable_setup_wizard(self):
+ self.log("Disable setup wizard")
+
+ cmd = 'chsystem -easysetup no'
+
+ stdin, stdout, stderr = self.ssh_client.client.exec_command(cmd)
+
+ def apply(self):
+ changed = False
+ is_lmc = False
+ msg = ""
+
+ if self.module.check_mode:
+ msg = "skipping changes due to check mode"
+ else:
+ if not self.ssh_client.is_client_connected:
+ self.module.fail_json(msg="SSH client not connected")
+
+ is_lmc = self.is_lmc()
+ if is_lmc:
+ self.disable_setup_wizard()
+ changed = True
+ msg += "Initial Setup configuration completed. Setup wizard is disabled."
+ self.ssh_client._svc_disconnect()
+ self.module.exit_json(msg=msg, changed=changed)
+ else:
+ msg += "This is a non LMC system. Please log in GUI to accept EULA. "
+ msg += "More details are available in README (https://github.com/ansible-collections/ibm.storage_virtualize)."
+ self.ssh_client._svc_disconnect()
+ self.module.fail_json(msg=msg, changed=changed)
+
+
+def main():
+ v = IBMSVCCompleteSetup()
+ try:
+ v.apply()
+ except Exception as e:
+ v.log("Exception in apply(): \n%s", format_exc())
+ v.module.fail_json(msg="Module failed. Error [%s]." % to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_host.py b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_host.py
new file mode 100644
index 000000000..8a0e6da64
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_host.py
@@ -0,0 +1,766 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2020 IBM CORPORATION
+# Author(s): Peng Wang <wangpww@cn.ibm.com>
+# Sreshtant Bohidar <sreshtant.bohidar@ibm.com>
+# Rohit Kumar <rohit.kumar6@ibm.com>
+# Sudheesh Reddy Satti<Sudheesh.Reddy.Satti@ibm.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_svc_host
+short_description: This module manages hosts on IBM Storage Virtualize family systems
+version_added: "1.0.0"
+description:
+ - Ansible interface to manage 'mkhost', 'chhost', and 'rmhost' host commands.
+options:
+ name:
+ description:
+ - Specifies a name or label for the new host object.
+ required: true
+ type: str
+ state:
+ description:
+ - Creates or updates (C(present)) or removes (C(absent)) a host.
+ choices: [ absent, present ]
+ required: true
+ type: str
+ clustername:
+ description:
+ - The hostname or management IP of the Storage Virtualize system.
+ required: true
+ type: str
+ domain:
+ description:
+ - Domain for the Storage Virtualize system.
+ - Valid when hostname is used for the parameter I(clustername).
+ type: str
+ username:
+ description:
+ - REST API username for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ password:
+ description:
+ - REST API password for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ token:
+ description:
+ - The authentication token to verify a user on the Storage Virtualize system.
+ - To generate a token, use the M(ibm.storage_virtualize.ibm_svc_auth) module.
+ type: str
+ version_added: '1.5.0'
+ fcwwpn:
+ description:
+ - List of Initiator WWPNs to be added to the host. The complete list of WWPNs must be provided.
+ - The parameters I(fcwwpn) and I(iscsiname) are mutually exclusive.
+ - Required when I(state=present), to create or modify a Fibre Channel (FC) host.
+ type: str
+ iscsiname:
+ description:
+ - List of Initiator IQNs to be added to the host. IQNs are separated by comma. The complete list of IQNs must be provided.
+ - The parameters I(fcwwpn) and I(iscsiname) are mutually exclusive.
+ - Valid when I(state=present), to create host.
+ type: str
+ iogrp:
+ description:
+ - Specifies a set of one or more input/output (I/O) groups from which the host can access the volumes.
+ Once specified, this parameter cannot be modified.
+ - Valid when I(state=present), to create a host.
+ type: str
+ nqn:
+ description:
+ - List of initiator NQNs to be added to the host. Each NQN is separated by a comma. The complete list of NQNs must be provided.
+ - Required when I(protocol=rdmanvme or tcpnvme), to create.
+ - Valid when I(state=present), to create or modify a host.
+ type: str
+ version_added: '1.12.0'
+ protocol:
+ description:
+ - Specifies the protocol used by the host to communicate with the storage system. Only 'scsi' protocol is supported.
+ - Valid when I(state=present), to create a host.
+ choices: [scsi, rdmanvme, tcpnvme]
+ type: str
+ type:
+ description:
+ - Specifies the type of host.
+ - Valid when I(state=present), to create or modify a host.
+ type: str
+ site:
+ description:
+ - Specifies the site name of the host.
+ - Valid when I(state=present), to create or modify a host.
+ type: str
+ hostcluster:
+ description:
+ - Specifies the name of the host cluster to which the host object is to be added.
+ A host cluster must exist before a host object can be added to it.
+ - Parameters I(hostcluster) and I(nohostcluster) are mutually exclusive.
+ - Valid when I(state=present), to create or modify a host.
+ type: str
+ version_added: '1.5.0'
+ nohostcluster:
+ description:
+ - If specified as C(True), host object is removed from the host cluster.
+ - Parameters I(hostcluster) and I(nohostcluster) are mutually exclusive.
+ - Valid when I(state=present), to modify an existing host.
+ type: bool
+ version_added: '1.5.0'
+ old_name:
+ description:
+ - Specifies the old name of the host while renaming.
+ - Valid when I(state=present), to rename an existing host.
+ type: str
+ version_added: '1.9.0'
+ portset:
+ description:
+ - Specifies the portset to be associated with the host.
+ - Valid when I(state=present), to create or modify a host.
+ type : str
+ version_added: '1.12.0'
+ partition:
+ description:
+ - Specifies the storage partition to be associated with the host.
+ - Valid when I(state=present), to create or modify a host.
+ - Supported from Storage Virtualize family systems 8.6.1.0 or later.
+ type : str
+ version_added: '2.1.0'
+ nopartition:
+ description:
+ - If specified as C(True), the host object is removed from the storage partition.
+ - Parameters I(partition) and I(nopartition) are mutually exclusive.
+ - Valid when I(state=present), to modify an existing host.
+ - Supported from Storage Virtualize family systems 8.6.1.0 or later.
+ type : bool
+ version_added: '2.1.0'
+ log_path:
+ description:
+ - Path of debug log file.
+ type: str
+ validate_certs:
+ description:
+ - Validates certification.
+ default: false
+ type: bool
+author:
+ - Sreshtant Bohidar (@Sreshtant-Bohidar)
+ - Rohit Kumar (@rohitk-github)
+notes:
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = '''
+- name: Define a new iSCSI host
+ ibm.storage_virtualize.ibm_svc_host:
+ clustername: "{{clustername}}"
+ domain: "{{domain}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ log_path: /tmp/playbook.debug
+ name: host4test
+ state: present
+ iscsiname: iqn.1994-05.com.redhat:2e358e438b8a
+ iogrp: 0:1:2:3
+ protocol: scsi
+ type: generic
+ site: site-name
+ portset: portset0
+- name: Add a host to an existing host cluster
+ ibm.storage_virtualize.ibm_svc_host:
+ clustername: "{{clustername}}"
+ domain: "{{domain}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ log_path: /tmp/playbook.debug
+ name: host4test
+ state: present
+ hostcluster: hostcluster0
+- name: Define a new FC host
+ ibm.storage_virtualize.ibm_svc_host:
+ clustername: "{{clustername}}"
+ domain: "{{domain}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ log_path: /tmp/playbook.debug
+ name: host4test
+ state: present
+ fcwwpn: 100000109B570216:1000001AA0570266
+ iogrp: 0:1:2:3
+ protocol: scsi
+ type: generic
+ site: site-name
+- name: Rename an existing host
+ ibm.storage_virtualize.ibm_svc_host:
+ clustername: "{{ clustername }}"
+ domain: "{{ domain }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ old_name: "host4test"
+ name: "new_host_name"
+ state: "present"
+- name: Create an iSCSI host
+ ibm.storage_virtualize.ibm_svc_host:
+ clustername: "{{clustername}}"
+ domain: "{{domain}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ log_path: /tmp/playbook.debug
+ name: host_name
+ iscsiname: iqn.1994-05.com.redhat:2e358e438b8a,iqn.localhost.hostid.7f000001
+ state: present
+- name: Create a tcpnvme host
+ ibm.storage_virtualize.ibm_svc_host:
+ clustername: "{{clustername}}"
+ domain: "{{domain}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ log_path: /tmp/playbook.debug
+ name: host_name
+ protocol: tcpnvme
+ nqn: nqn.2014-08.org.nvmexpress:NVMf:uuid:644f51bf-8432-4f59-bb13-5ada20c06397
+ state: present
+- name: Delete a host
+ ibm.storage_virtualize.ibm_svc_host:
+ clustername: "{{clustername}}"
+ domain: "{{domain}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ log_path: /tmp/playbook.debug
+ name: new_host_name
+ state: absent
+'''
+
+RETURN = '''#'''
+
+from traceback import format_exc
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi, svc_argument_spec, get_logger
+from ansible.module_utils._text import to_native
+
+
+class IBMSVChost(object):
+ def __init__(self):
+ argument_spec = svc_argument_spec()
+
+ argument_spec.update(
+ dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', required=True, choices=['absent',
+ 'present']),
+ fcwwpn=dict(type='str', required=False),
+ iscsiname=dict(type='str', required=False),
+ iogrp=dict(type='str', required=False),
+ protocol=dict(type='str', required=False, choices=['scsi',
+ 'rdmanvme', 'tcpnvme']),
+ type=dict(type='str'),
+ site=dict(type='str'),
+ hostcluster=dict(type='str'),
+ nohostcluster=dict(type='bool'),
+ old_name=dict(type='str', required=False),
+ nqn=dict(type='str', required=False),
+ portset=dict(type='str', required=False),
+ partition=dict(type='str', required=False),
+ nopartition=dict(type='bool', required=False)
+ )
+ )
+
+ self.module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ # logging setup
+ log_path = self.module.params['log_path']
+ log = get_logger(self.__class__.__name__, log_path)
+ self.log = log.info
+
+ # Required
+ self.name = self.module.params['name']
+ self.state = self.module.params['state']
+
+ # Optional
+ self.fcwwpn = self.module.params.get('fcwwpn', '')
+ self.iscsiname = self.module.params.get('iscsiname', '')
+ self.iogrp = self.module.params.get('iogrp', '')
+ self.protocol = self.module.params.get('protocol', '')
+ self.type = self.module.params.get('type', '')
+ self.site = self.module.params.get('site', '')
+ self.hostcluster = self.module.params.get('hostcluster', '')
+ self.nohostcluster = self.module.params.get('nohostcluster', '')
+ self.old_name = self.module.params.get('old_name', '')
+ self.nqn = self.module.params.get('nqn', '')
+ self.portset = self.module.params.get('portset', '')
+ self.partition = self.module.params.get('partition', '')
+ self.nopartition = self.module.params.get('nopartition', '')
+
+ self.basic_checks()
+
+ # internal variable
+ self.changed = False
+
+ # Handling duplicate fcwwpn
+ if self.fcwwpn:
+ dup_fcwwpn = self.duplicate_checker(self.fcwwpn.split(':'))
+ if dup_fcwwpn:
+ self.module.fail_json(msg='The parameter {0} has been entered multiple times. Enter the parameter only one time.'.format(dup_fcwwpn))
+
+ # Handling duplicate iscsiname
+ if self.iscsiname:
+ dup_iscsiname = self.duplicate_checker(self.iscsiname.split(','))
+ if dup_iscsiname:
+ self.module.fail_json(msg='The parameter {0} has been entered multiple times. Enter the parameter only one time.'.format(dup_iscsiname))
+
+ # Handling duplicate nqn
+ if self.nqn:
+ dup_nqn = self.duplicate_checker(self.nqn.split(','))
+ if dup_nqn:
+ self.module.fail_json(
+ msg='The parameter {0} has been entered multiple times. Enter the parameter only one time.'.format(
+ dup_nqn))
+
+ # Handling for missing mandatory parameter name
+ if not self.name:
+ self.module.fail_json(msg='Missing mandatory parameter: name')
+ # Handling for parameter protocol
+ if self.protocol:
+ if self.protocol not in ('scsi', 'rdmanvme', 'tcpnvme'):
+ self.module.fail_json(msg="[{0}] is not supported for iscsiname. only 'scsi', 'rdmanvme' and 'tcpnvme' "
+ "protocols are supported.".format(self.protocol))
+
+ self.restapi = IBMSVCRestApi(
+ module=self.module,
+ clustername=self.module.params['clustername'],
+ domain=self.module.params['domain'],
+ username=self.module.params['username'],
+ password=self.module.params['password'],
+ validate_certs=self.module.params['validate_certs'],
+ log_path=log_path,
+ token=self.module.params['token']
+ )
+
+ def basic_checks(self):
+ if self.state == 'present':
+ if self.partition and self.nopartition:
+ self.module.fail_json(msg='Mutually exclusive parameters: partition, nopartition')
+ if self.state == 'absent':
+ fields = [f for f in ['protocol', 'portset', 'nqn', 'type', 'partition', 'nopartition'] if getattr(self, f)]
+
+ if any(fields):
+ self.module.fail_json(msg='Parameters {0} not supported while deleting a host'.format(', '.join(fields)))
+
+ # for validating parameter while renaming a volume
+ def parameter_handling_while_renaming(self):
+ parameters = {
+ "fcwwpn": self.fcwwpn,
+ "iscsiname": self.iscsiname,
+ "iogrp": self.iogrp,
+ "protocol": self.protocol,
+ "type": self.type,
+ "site": self.site,
+ "hostcluster": self.hostcluster,
+ "nohostcluster": self.nohostcluster,
+ "partition": self.partition,
+ "nopartition": self.nopartition
+ }
+ parameters_exists = [parameter for parameter, value in parameters.items() if value]
+ if parameters_exists:
+ self.module.fail_json(msg="Parameters {0} not supported while renaming a host.".format(parameters_exists))
+
+ def duplicate_checker(self, items):
+ unique_items = set(items)
+ if len(items) != len(unique_items):
+ return [element for element in unique_items if items.count(element) > 1]
+ else:
+ return []
+
+ def get_existing_host(self, host_name):
+ merged_result = {}
+
+ data = self.restapi.svc_obj_info(cmd='lshost', cmdopts=None,
+ cmdargs=[host_name])
+
+ if isinstance(data, list):
+ for d in data:
+ merged_result.update(d)
+ else:
+ merged_result = data
+
+ return merged_result
+
+ # TBD: Implement a more generic way to check for properties to modify.
+ def host_probe(self, data):
+ props = []
+ if self.hostcluster and self.nohostcluster:
+ self.module.fail_json(msg="You must not pass in both hostcluster and "
+ "nohostcluster to the module.")
+
+ if self.partition and self.nopartition:
+ self.module.fail_json(msg="You must not pass in both partition and "
+ "nopartition to the module.")
+
+ if self.hostcluster and (self.hostcluster != data['host_cluster_name']):
+ if data['host_cluster_name'] != '':
+ self.module.fail_json(msg="Host already belongs to hostcluster [%s]" % data['host_cluster_name'])
+ else:
+ props += ['hostcluster']
+
+ # TBD: The parameter is fcwwpn but the view has fcwwpn label.
+ if self.type:
+ if self.type != data['type']:
+ props += ['type']
+
+ if self.fcwwpn:
+ self.existing_fcwwpn = [node["WWPN"] for node in data['nodes'] if "WWPN" in node]
+ self.input_fcwwpn = self.fcwwpn.upper().split(":")
+ if set(self.existing_fcwwpn).symmetric_difference(set(self.input_fcwwpn)):
+ props += ['fcwwpn']
+
+ if self.iscsiname:
+ self.existing_iscsiname = [node["iscsi_name"] for node in data['nodes'] if "iscsi_name" in node]
+ self.input_iscsiname = self.iscsiname.split(",")
+ if set(self.existing_iscsiname).symmetric_difference(set(self.input_iscsiname)):
+ props += ['iscsiname']
+
+ if self.nqn:
+ self.existing_nqn = [node["nqn"] for node in data['nodes'] if "nqn" in node]
+ self.input_nqn = self.nqn.split(",")
+ if set(self.existing_nqn).symmetric_difference(set(self.input_nqn)):
+ props += ['nqn']
+
+ if self.site:
+ if self.site != data['site_name']:
+ props += ['site']
+
+ if self.nohostcluster:
+ if data['host_cluster_name'] != '':
+ props += ['nohostcluster']
+
+ if self.portset:
+ if self.portset != data['portset_name']:
+ props += ['portset']
+
+ if self.partition and self.partition != data['partition_name']:
+ if data['partition_name'] != '':
+ self.module.fail_json(msg="Host already belongs to partition [%s]" % data['partition_name'])
+ else:
+ props += ['partition']
+
+ if self.nopartition:
+ if data['partition_name'] != '':
+ props += ['nopartition']
+
+ self.log("host_probe props='%s'", props)
+ return props
+
+ def host_create(self):
+ if (not self.fcwwpn) and (not self.iscsiname) and (not self.nqn):
+ self.module.fail_json(msg="You must pass in fcwwpn or iscsiname or nqn "
+ "to the module.")
+
+ if (self.fcwwpn and self.iscsiname) or (self.nqn and self.iscsiname) or (self.fcwwpn and self.nqn):
+ self.module.fail_json(msg="You have to pass only one parameter among fcwwpn, nqn and "
+ "iscsiname to the module.")
+
+ if self.hostcluster and self.nohostcluster:
+ self.module.fail_json(msg="You must not pass in both hostcluster and "
+ "nohostcluster to the module.")
+
+ if self.hostcluster and self.partition:
+ self.module.fail_json(msg='You must not pass in both hostcluster and partition to the module.')
+
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ self.log("creating host '%s'", self.name)
+
+ # Make command
+ cmd = 'mkhost'
+ cmdopts = {'name': self.name, 'force': True}
+ if self.fcwwpn:
+ cmdopts['fcwwpn'] = self.fcwwpn
+ elif self.iscsiname:
+ cmdopts['iscsiname'] = self.iscsiname
+ else:
+ cmdopts['nqn'] = self.nqn
+
+ cmdopts['protocol'] = self.protocol if self.protocol else 'scsi'
+ if self.iogrp:
+ cmdopts['iogrp'] = self.iogrp
+ if self.type:
+ cmdopts['type'] = self.type
+ if self.site:
+ cmdopts['site'] = self.site
+ if self.portset:
+ cmdopts['portset'] = self.portset
+ if self.partition:
+ cmdopts['partition'] = self.partition
+
+ self.log("creating host command '%s' opts '%s'",
+ self.fcwwpn, self.type)
+
+ # Run command
+ result = self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
+ self.log("create host result '%s'", result)
+
+ if result and 'message' in result:
+ self.changed = True
+ self.log("create host result message '%s'", (result['message']))
+ else:
+ self.module.fail_json(
+ msg="Failed to create host [%s]" % self.name)
+
+ def host_fcwwpn_update(self):
+ to_be_removed = ':'.join(list(set(self.existing_fcwwpn) - set(self.input_fcwwpn)))
+ if to_be_removed:
+ self.restapi.svc_run_command(
+ 'rmhostport',
+ {'fcwwpn': to_be_removed, 'force': True},
+ [self.name]
+ )
+ self.log('%s removed from %s', to_be_removed, self.name)
+ to_be_added = ':'.join(list(set(self.input_fcwwpn) - set(self.existing_fcwwpn)))
+ if to_be_added:
+ self.restapi.svc_run_command(
+ 'addhostport',
+ {'fcwwpn': to_be_added, 'force': True},
+ [self.name]
+ )
+ self.log('%s added to %s', to_be_added, self.name)
+
+ def host_iscsiname_update(self):
+ to_be_removed = ','.join(list(set(self.existing_iscsiname) - set(self.input_iscsiname)))
+ if to_be_removed:
+ self.restapi.svc_run_command(
+ 'rmhostport',
+ {'iscsiname': to_be_removed, 'force': True},
+ [self.name]
+ )
+ self.log('%s removed from %s', to_be_removed, self.name)
+ to_be_added = ','.join(list(set(self.input_iscsiname) - set(self.existing_iscsiname)))
+ if to_be_added:
+ self.restapi.svc_run_command(
+ 'addhostport',
+ {'iscsiname': to_be_added, 'force': True},
+ [self.name]
+ )
+ self.log('%s added to %s', to_be_added, self.name)
+
+ def host_nqn_update(self):
+ to_be_removed = ','.join(list(set(self.existing_nqn) - set(self.input_nqn)))
+ if to_be_removed:
+ self.restapi.svc_run_command(
+ 'rmhostport',
+ {'nqn': to_be_removed, 'force': True},
+ [self.name]
+ )
+ self.log('%s removed from %s', to_be_removed, self.name)
+ to_be_added = ','.join(list(set(self.input_nqn) - set(self.existing_nqn)))
+ if to_be_added:
+ self.restapi.svc_run_command(
+ 'addhostport',
+ {'nqn': to_be_added, 'force': True},
+ [self.name]
+ )
+ self.log('%s added to %s', to_be_added, self.name)
+
+ def host_update(self, modify, host_data):
+ # update the host
+ self.log("updating host '%s'", self.name)
+ if 'hostcluster' in modify:
+ self.addhostcluster()
+ elif 'nohostcluster' in modify:
+ self.removehostcluster(host_data)
+
+ cmd = 'chhost'
+ cmdopts = {}
+ if 'fcwwpn' in modify:
+ self.host_fcwwpn_update()
+ self.changed = True
+ self.log("fcwwpn of %s updated", self.name)
+ if 'iscsiname' in modify:
+ self.host_iscsiname_update()
+ self.changed = True
+ self.log("iscsiname of %s updated", self.name)
+ if 'nqn' in modify:
+ self.host_nqn_update()
+ self.changed = True
+ self.log("nqn of %s updated", self.name)
+ if 'type' in modify:
+ cmdopts['type'] = self.type
+ if 'site' in modify:
+ cmdopts['site'] = self.site
+ if 'portset' in modify:
+ cmdopts['portset'] = self.portset
+ if 'partition' in modify:
+ cmdopts['partition'] = self.partition
+ if 'nopartition' in modify:
+ cmdopts['nopartition'] = self.nopartition
+ if cmdopts:
+ cmdargs = [self.name]
+ self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
+ # Any error will have been raised in svc_run_command
+ # chhost does not output anything when successful.
+ self.changed = True
+ self.log("type of %s updated", self.name)
+
+ def host_delete(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ self.log("deleting host '%s'", self.name)
+
+ cmd = 'rmhost'
+ cmdopts = {}
+ cmdargs = [self.name]
+
+ self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
+
+ # Any error will have been raised in svc_run_command
+ # chhost does not output anything when successful.
+ self.changed = True
+
+ def get_existing_hostcluster(self):
+ self.log("get_existing_hostcluster %s", self.hostcluster)
+
+ data = self.restapi.svc_obj_info(cmd='lshostcluster', cmdopts=None,
+ cmdargs=[self.hostcluster])
+
+ return data
+
+ def addhostcluster(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ self.log("Adding host '%s' in hostcluster %s", self.name, self.hostcluster)
+
+ cmd = 'addhostclustermember'
+ cmdopts = {}
+ cmdargs = [self.hostcluster]
+
+ cmdopts['host'] = self.name
+
+ self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
+
+ # Any error will have been raised in svc_run_command
+ # chhost does not output anything when successful.
+ self.changed = True
+
+ def removehostcluster(self, data):
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ self.log("removing host '%s' from hostcluster %s", self.name, data['host_cluster_name'])
+
+ hostcluster_name = data['host_cluster_name']
+
+ cmd = 'rmhostclustermember'
+ cmdopts = {}
+ cmdargs = [hostcluster_name]
+
+ cmdopts['host'] = self.name
+ cmdopts['keepmappings'] = True
+
+ self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
+
+ # Any error will have been raised in svc_run_command
+ # chhost does not output anything when successful.
+ self.changed = True
+
+ # function for renaming an existing host with a new name
+ def host_rename(self, host_data):
+ msg = ''
+ self.parameter_handling_while_renaming()
+ old_host_data = self.get_existing_host(self.old_name)
+ if not old_host_data and not host_data:
+ self.module.fail_json(msg="Host [{0}] does not exists.".format(self.old_name))
+ elif old_host_data and host_data:
+ self.module.fail_json(msg="Host [{0}] already exists.".format(self.name))
+ elif not old_host_data and host_data:
+ msg = "Host with name [{0}] already exists.".format(self.name)
+ elif old_host_data and not host_data:
+ # when check_mode is enabled
+ if self.module.check_mode:
+ self.changed = True
+ return
+ self.restapi.svc_run_command('chhost', {'name': self.name}, [self.old_name])
+ self.changed = True
+ msg = "Host [{0}] has been successfully rename to [{1}].".format(self.old_name, self.name)
+ return msg
+
+ def apply(self):
+ changed = False
+ msg = None
+ modify = []
+
+ host_data = self.get_existing_host(self.name)
+
+ if self.state == 'present' and self.old_name:
+ msg = self.host_rename(host_data)
+ elif self.state == 'absent' and self.old_name:
+ self.module.fail_json(msg="Rename functionality is not supported when 'state' is absent.")
+ else:
+ if host_data:
+ if self.state == 'absent':
+ self.log("CHANGED: host exists, but requested state is 'absent'")
+ changed = True
+ elif self.state == 'present':
+ # This is where we detect if chhost should be called
+ modify = self.host_probe(host_data)
+ if modify:
+ changed = True
+ else:
+ if self.state == 'present':
+ self.log("CHANGED: host does not exist, but requested state is 'present'")
+ changed = True
+
+ if changed:
+ if self.state == 'present':
+ if self.hostcluster:
+ hc_data = self.get_existing_hostcluster()
+ if hc_data is None:
+ self.module.fail_json(msg="Host cluster must already exist before its usage in this module")
+ elif not host_data and hc_data:
+ self.host_create()
+ self.addhostcluster()
+ msg = "host %s has been created and added to hostcluster." % self.name
+ elif not host_data:
+ self.host_create()
+ msg = "host %s has been created." % self.name
+ if host_data and modify:
+ # This is where we would modify
+ self.host_update(modify, host_data)
+ msg = "host [%s] has been modified." % self.name
+ elif self.state == 'absent':
+ self.host_delete()
+ msg = "host [%s] has been deleted." % self.name
+ else:
+ self.log("exiting with no changes")
+ if self.state == 'absent':
+ msg = "host [%s] did not exist." % self.name
+ else:
+ msg = "host [%s] already exists." % self.name
+ if self.module.check_mode:
+ msg = 'skipping changes due to check mode'
+
+ self.module.exit_json(msg=msg, changed=self.changed)
+
+
+def main():
+ v = IBMSVChost()
+ try:
+ v.apply()
+ except Exception as e:
+ v.log("Exception in apply(): \n%s", format_exc())
+ v.module.fail_json(msg="Module failed. Error [%s]." % to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_hostcluster.py b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_hostcluster.py
new file mode 100644
index 000000000..c1df1a847
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_hostcluster.py
@@ -0,0 +1,344 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2021 IBM CORPORATION
+# Author(s): Shilpi Jain <shilpi.jain1@ibm.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_svc_hostcluster
+short_description: This module manages host cluster on IBM Storage Virtualize family systems
+version_added: "1.5.0"
+description:
+ - Ansible interface to manage 'mkhostcluster', 'chhostcluster' and 'rmhostcluster' host commands.
+options:
+ name:
+ description:
+ - Specifies a name or label for the new host cluster object.
+ required: true
+ type: str
+ state:
+ description:
+ - Creates (C(present)) or removes (C(absent)) a host cluster.
+ choices: [ absent, present ]
+ required: true
+ type: str
+ clustername:
+ description:
+ - The hostname or management IP of the Storage Virtualize system.
+ required: true
+ type: str
+ domain:
+ description:
+ - Domain for the Storage Virtualize system.
+ - Valid when hostname is used for the parameter I(clustername).
+ type: str
+ username:
+ description:
+ - REST API username for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ password:
+ description:
+ - REST API password for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ token:
+ description:
+ - The authentication token to verify a user on the Storage Virtualize system.
+ - To generate a token, use the M(ibm.storage_virtualize.ibm_svc_auth) module.
+ type: str
+ ownershipgroup:
+ description:
+ - The name of the ownership group to which the host cluster object is being added.
+ - Parameters I(ownershipgroup) and I(noownershipgroup) are mutually exclusive.
+ - Applies when I(state=present).
+ type: str
+ version_added: '1.6.0'
+ noownershipgroup:
+ description:
+ - If specified True, the host cluster object is removed from the ownership group to which it belongs.
+ - Parameters I(ownershipgroup) and I(noownershipgroup) are mutually exclusive.
+ - Applies when I(state=present) to modify an existing hostcluster.
+ type: bool
+ version_added: '1.6.0'
+ removeallhosts:
+ description:
+ - Specifies that all hosts in the host cluster and the associated host cluster object be deleted.
+ - Applies when I(state=absent).
+ type: bool
+ log_path:
+ description:
+ - Path of debug log file.
+ type: str
+ validate_certs:
+ description:
+ - Validates certification.
+ default: false
+ type: bool
+author:
+ - Shilpi Jain (@Shilpi-J)
+notes:
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = '''
+- name: Define a new host cluster
+ ibm.storage_virtualize.ibm_svc_hostcluster:
+ clustername: "{{clustername}}"
+ domain: "{{domain}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ log_path: /tmp/playbook.debug
+ name: hostcluster0
+ state: present
+ ownershipgroup: group1
+- name: Update the ownershipgroup of a host cluster
+ ibm.storage_virtualize.ibm_svc_hostcluster:
+ clustername: "{{clustername}}"
+ domain: "{{domain}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ log_path: /tmp/playbook.debug
+ name: hostcluster0
+ state: present
+ noownershipgroup: True
+- name: Delete a host cluster
+ ibm.storage_virtualize.ibm_svc_hostcluster:
+ clustername: "{{clustername}}"
+ domain: "{{domain}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ log_path: /tmp/playbook.debug
+ name: hostcluster0
+ state: absent
+ removeallhosts: True
+'''
+
+RETURN = '''#'''
+
+from traceback import format_exc
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi, svc_argument_spec, get_logger
+from ansible.module_utils._text import to_native
+
+
+class IBMSVChostcluster(object):
+ def __init__(self):
+ argument_spec = svc_argument_spec()
+
+ argument_spec.update(
+ dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', required=True, choices=['absent',
+ 'present']),
+ ownershipgroup=dict(type='str'),
+ noownershipgroup=dict(type='bool'),
+ removeallhosts=dict(type='bool')
+ )
+ )
+
+ self.changed = ""
+
+ self.module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ # logging setup
+ log_path = self.module.params['log_path']
+ log = get_logger(self.__class__.__name__, log_path)
+ self.log = log.info
+
+ # Required
+ self.name = self.module.params['name']
+ self.state = self.module.params['state']
+
+ # Optional
+ self.ownershipgroup = self.module.params.get('ownershipgroup', '')
+ self.noownershipgroup = self.module.params.get('noownershipgroup', '')
+ self.removeallhosts = self.module.params.get('removeallhosts', '')
+
+ # Handling missing mandatory parameter name
+ if not self.name:
+ self.module.fail_json(msg='Missing mandatory parameter: name')
+
+ self.restapi = IBMSVCRestApi(
+ module=self.module,
+ clustername=self.module.params['clustername'],
+ domain=self.module.params['domain'],
+ username=self.module.params['username'],
+ password=self.module.params['password'],
+ validate_certs=self.module.params['validate_certs'],
+ log_path=log_path,
+ token=self.module.params['token']
+ )
+
+ def get_existing_hostcluster(self):
+ merged_result = {}
+
+ data = self.restapi.svc_obj_info(cmd='lshostcluster', cmdopts=None,
+ cmdargs=[self.name])
+
+ if isinstance(data, list):
+ for d in data:
+ merged_result.update(d)
+ else:
+ merged_result = data
+
+ return merged_result
+
+ def hostcluster_probe(self, data):
+ props = []
+ if self.removeallhosts:
+ self.module.fail_json(msg="Parameter 'removeallhosts' can be used only while deleting hostcluster")
+
+ if self.ownershipgroup and self.noownershipgroup:
+ self.module.fail_json(msg="You must not pass in both 'ownershipgroup' and "
+ "'noownershipgroup' to the module.")
+
+ if data['owner_name'] and self.noownershipgroup:
+ props += ['noownershipgroup']
+
+ if self.ownershipgroup and (not data['owner_name'] or self.ownershipgroup != data['owner_name']):
+ props += ['ownershipgroup']
+
+ if props is []:
+ props = None
+
+ self.log("hostcluster_probe props='%s'", data)
+ return props
+
+ def hostcluster_create(self):
+ if self.removeallhosts:
+ self.module.fail_json(msg="Parameter 'removeallhosts' cannot be passed while creating hostcluster")
+
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ # Make command
+ cmd = 'mkhostcluster'
+ cmdopts = {'name': self.name}
+
+ if self.ownershipgroup:
+ cmdopts['ownershipgroup'] = self.ownershipgroup
+
+ self.log("creating host cluster command opts '%s'",
+ self.ownershipgroup)
+
+ # Run command
+ result = self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
+ self.log("create host cluster result '%s'", result)
+
+ if 'message' in result:
+ self.changed = True
+ self.log("create host cluster result message '%s'", (result['message']))
+ else:
+ self.module.fail_json(
+ msg="Failed to create host cluster [%s]" % self.name)
+
+ def hostcluster_update(self, modify):
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ self.log("updating host cluster '%s'", self.name)
+ cmd = 'chhostcluster'
+ cmdopts = {}
+ if 'ownershipgroup' in modify:
+ cmdopts['ownershipgroup'] = self.ownershipgroup
+ elif 'noownershipgroup' in modify:
+ cmdopts['noownershipgroup'] = self.noownershipgroup
+
+ if cmdopts:
+ cmdargs = [self.name]
+ self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
+ # Any error will have been raised in svc_run_command
+ # chhost does not output anything when successful.
+ self.changed = True
+ self.log("Properties of %s updated", self.name)
+
+ def hostcluster_delete(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ self.log("deleting host cluster '%s'", self.name)
+
+ cmd = 'rmhostcluster'
+ cmdopts = {}
+ cmdargs = [self.name]
+
+ if self.removeallhosts:
+ cmdopts = {'force': True}
+ cmdopts['removeallhosts'] = self.removeallhosts
+
+ self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
+
+ # Any error will have been raised in svc_run_command
+ # chhost does not output anything when successful.
+ self.changed = True
+
+ def apply(self):
+ changed = False
+ msg = None
+ modify = []
+
+ hc_data = self.get_existing_hostcluster()
+
+ if hc_data:
+ if self.state == 'absent':
+ self.log("CHANGED: host cluster exists, but requested "
+ "state is 'absent'")
+ changed = True
+ elif self.state == 'present':
+ # This is where we detect if chhostcluster should be called
+ modify = self.hostcluster_probe(hc_data)
+ if modify:
+ changed = True
+ else:
+ if self.state == 'present':
+ self.log("CHANGED: host cluster does not exist, "
+ "but requested state is 'present'")
+ changed = True
+
+ if changed:
+ if self.state == 'present':
+ if not hc_data:
+ self.hostcluster_create()
+ msg = "host cluster %s has been created." % self.name
+ else:
+ # This is where we would modify
+ self.hostcluster_update(modify)
+ msg = "host cluster [%s] has been modified." % self.name
+ elif self.state == 'absent':
+ self.hostcluster_delete()
+ msg = "host cluster [%s] has been deleted." % self.name
+
+ if self.module.check_mode:
+ msg = "skipping changes due to check mode"
+ else:
+ self.log("exiting with no changes")
+ if self.state == 'absent':
+ msg = "host cluster [%s] did not exist." % self.name
+ else:
+ msg = "host cluster [%s] already exists. No modifications done." % self.name
+
+ self.module.exit_json(msg=msg, changed=changed)
+
+
+def main():
+ v = IBMSVChostcluster()
+ try:
+ v.apply()
+ except Exception as e:
+ v.log("Exception in apply(): \n%s", format_exc())
+ v.module.fail_json(msg="Module failed. Error [%s]." % to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_info.py b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_info.py
new file mode 100644
index 000000000..61f64a97b
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_info.py
@@ -0,0 +1,1070 @@
+#!/usr/bin/python
+# Copyright (C) 2024 IBM CORPORATION
+# Author(s): Peng Wang <wangpww@cn.ibm.com>
+# Sreshtant Bohidar <sreshtant.bohidar@ibm.com>
+# Sanjaikumaar M <sanjaikumaar.m@ibm.com>
+# Sumit Kumar Gupta <sumit.gupta16@ibm.com>
+# Sandip Gulab Rajbanshi <sandip.rajbanshi@ibm.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_svc_info
+short_description: This module gathers various information from the IBM Storage Virtualize family systems
+version_added: "1.0.0"
+description:
+- Gathers the list of specified IBM Storage Virtualize family system
+ entities. These include the list of nodes, pools, volumes, hosts,
+ host clusters, FC ports, iSCSI ports, target port FC, FC consistgrp,
+ vdiskcopy, I/O groups, FC map, FC connectivity, NVMe fabric,
+ array, and system.
+author:
+ - Peng Wang (@wangpww)
+ - Sumit Kumar Gupta (@sumitguptaibm)
+ - Sandip Gulab Rajbanshi (@Sandip-Rajbanshi)
+options:
+ clustername:
+ description:
+ - The hostname or management IP of the
+ Storage Virtualize system.
+ type: str
+ required: true
+ domain:
+ description:
+ - Domain for the Storage Virtualize system.
+ - Valid when hostname is used for the parameter I(clustername).
+ type: str
+ username:
+ description:
+ - REST API username for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ password:
+ description:
+ - REST API password for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ token:
+ description:
+ - The authentication token to verify a user on the Storage Virtualize system.
+ - To generate a token, use the ibm_svc_auth module.
+ type: str
+ version_added: '1.5.0'
+ log_path:
+ description:
+ - Path of debug log file.
+ type: str
+ validate_certs:
+ description:
+ - Validates certification.
+ default: false
+ type: bool
+ objectname:
+ description:
+ - If specified, only the instance with the I(objectname) is returned. If not specified, all the instances are returned.
+ type: str
+ gather_subset:
+ type: list
+ elements: str
+ description:
+ - List of string variables to specify the Storage Virtualize entities
+ for which information is required.
+ - all - list of all Storage Virtualize entities
+ supported by the module.
+ - vol - lists information for VDisks.
+ - pool - lists information for mdiskgrps.
+ - node - lists information for nodes.
+ - iog - lists information for I/O groups.
+ - host - lists information for hosts.
+ - hostvdiskmap - lists all VDisks mapped to host 'objectname'
+ - vdiskhostmap - lists all hosts VDisk 'objectname' is mapped to
+ - hc - lists information for host clusters.
+ - fc - lists information for FC connectivity.
+ - fcport - lists information for FC ports.
+ - targetportfc - lists information for WWPN which is required to set up
+ FC zoning and to display the current failover status
+ of host I/O ports.
+ - fcmap - lists information for FC maps.
+ - rcrelationship - lists information for remote copy relationships.
+ - fcconsistgrp - displays a concise list or a detailed
+ view of flash copy consistency groups.
+ - rcconsistgrp - displays a concise list or a detailed
+ view of remote copy consistency groups.
+ - iscsiport - lists information for iSCSI ports.
+ - vdiskcopy - lists information for volume copy.
+ - array - lists information for array MDisks.
+ - system - displays the storage system information.
+ - cloudaccount - lists all the configured cloud accounts.
+ - cloudaccountusage - lists the usage information about the configured cloud storage accounts.
+ - cloudimportcandidate - lists information about systems that have data that is stored in the cloud accounts.
+ - ldapserver - lists the most recent details for all configured Lightweight Directory Access Protocol (LDAP) servers.
+ - drive - lists the configuration information and drive vital product data (VPD).
+ - user - lists all the users that are created on the system.
+ - usergroup - lists the user groups that is created on the system.
+ - ownershipgroup - displays the ownership groups that are available in the system.
+ - partnership - lists all the clustered systems (systems) that are associated with the local system.
+ - replicationpolicy - lists all the replication policies on the system.
+ - cloudbackup - lists the volumes that have cloud snapshot enabled and volumes that have cloud snapshots in the cloud account.
+ - cloudbackupgeneration - lists any volume snapshots available on the specified volume. I(objectname) is a mandatory parameter.
+ - snapshotpolicy - lists all the snapshot policies on the system.
+ - snapshotpolicyschedule - lists all snapshot schedules on the system.
+ - volumegroup - lists all volume groups on the system.
+ - volumepopulation - list the population information about volumes of type clone or thinclone.
+ - volumegrouppopulation - list the information about volume groups of type clone or thinclone.
+ - volumegroupsnapshotpolicy - lists the snapshot policy attributes associated with a volume group on the system.
+ - volumesnapshot - lists all volume snapshots.
+ - dnsserver - lists the information for any Domain Name System (DNS) servers in the system.
+ - systemcertificate - lists the information about the current system Secure Sockets Layer (SSL) certificate.
+ - truststore - lists the current certificate stores.
+ - sra - command to check both secure remote assistance status and the time of the last login.
+ - syslogserver - lists the syslog servers that are configured on the clustered system.
+ - emailserver - lists the email servers that are configured on the system.
+ - emailuser - lists the Email event notification settings for all Email recipients,
+ an individual Email recipient, or a specified type (local or support) of an Email recipient.
+ - provisioningpolicy - lists the provisioning policies available on the system.
+ - volumegroupsnapshot - lists the snapshot objects available on the system.
+ - callhome - displays the status of the Call Home information that is sent to a server in the Cloud.
+ - ip - lists the currently configured IP addresses.
+ - portset - lists the currently configured portset on the system.
+ - safeguardedpolicy - lists the Safeguarded policies available on the system.
+ - mdisk - displays a concise list or a detailed view of managed disks (MDisks) visible to the system.
+ - safeguardedpolicyschedule - displays the Safeguarded backup schedule that is associated with Safeguarded policies.
+ - eventlog - displays the concise view of system event log
+ - enclosurestats - lists the most recent values (averaged) of all enclosure statistics.
+ - enclosurestatshistory - lists the history values of all enclosure statistics including power consumed,
+ temperature in fahrenheit and temperature in celsius.
+ - driveclass - lists all drive classes in the system
+ - security - display the current system Secure Sockets Layer (SSL) or Transport Layer Security (TLS) security and
+ password rules settings
+ - partition - display all the storage partitions information related to storage.
+ - volumegroupreplication - displays all the replication information for the volume group.
+ - plugin - display the information of registered plugins.
+ - quorum - display all the quorum devices that the system uses to store quorum data.
+ - enclosure - displays a summary of the enclosures.
+ - snmpserver - display a concise list or a detailed view of SNMP servers that are configured on the system
+ - testldapserver - tests a Lightweight Directory Access Protocol (LDAP) server.
+ choices: [vol, pool, node, iog, host, hostvdiskmap, vdiskhostmap, hc, fcport
+ , iscsiport, fc, fcmap, fcconsistgrp, rcrelationship, rcconsistgrp
+ , vdiskcopy, targetportfc, array, system, 'cloudaccount', 'cloudaccountusage',
+ 'ldapserver', 'drive', 'user', 'usergroup', 'ownershipgroup',
+ 'partnership', 'replicationpolicy', 'cloudbackup', 'enclosurestats',
+ 'cloudbackupgeneration', 'snapshotpolicy', 'snapshotpolicyschedule',
+ 'volumegroup', 'volumepopulation', 'volumegrouppopulation', 'volumegroupsnapshotpolicy', 'volumesnapshot',
+ 'dnsserver', 'systemcertificate', 'sra', 'syslogserver', 'enclosurestatshistory',
+ 'emailserver', 'emailuser', 'provisioningpolicy', 'volumegroupsnapshot',
+ 'truststore', 'callhome', 'ip', 'portset', 'safeguardedpolicy',
+ 'mdisk', 'safeguardedpolicyschedule', 'cloudimportcandidate', 'eventlog', 'driveclass', 'security', 'partition',
+ 'volumegroupreplication', 'plugin', 'quorum', 'enclosure', 'snmpserver', 'testldapserver', all]
+ default: "all"
+notes:
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = '''
+- name: Get volume info
+ ibm.storage_virtualize.ibm_svc_info:
+ clustername: "{{clustername}}"
+ domain: "{{domain}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ log_path: /tmp/ansible.log
+ gather_subset: vol
+- name: Get volume info
+ ibm.storage_virtualize.ibm_svc_info:
+ clustername: "{{clustername}}"
+ domain: "{{domain}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ log_path: /tmp/ansible.log
+ objectname: volumename
+ gather_subset: vol
+- name: Get pool info
+ ibm.storage_virtualize.ibm_svc_info:
+ clustername: "{{clustername}}"
+ domain: "{{domain}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ log_path: /tmp/ansible.log
+ gather_subset: pool
+- name: Get population information about volumes and volumegroups of type clone or thinclone
+ ibm.storage_virtualize.ibm_svc_info:
+ clustername: "{{clustername}}"
+ domain: "{{domain}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ log_path: /tmp/ansible.log
+ gather_subset: ['volumepopulation','volumegrouppopulation']
+- name: Get all info related to volume 'Volume1'
+ ibm.storage_virtualize.ibm_svc_info:
+ clustername: "{{clustername}}"
+ domain: "{{domain}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ log_path: /tmp/ansible.log
+ gather_subset: vol
+ objectname: Volume1
+'''
+
+RETURN = '''
+Array:
+ description:
+ - Data will be populated when I(gather_subset=array) or I(gather_subset=all)
+ - Lists information for array MDisks
+ returned: success
+ type: list
+ elements: dict
+ sample: [{...}]
+CallHome:
+ description:
+ - Data will be populated when I(gather_subset=callhome) or I(gather_subset=all)
+ - Displays the status of the Call Home information that is sent to a server in the Cloud
+ returned: success
+ type: list
+ elements: dict
+ sample: [{...}]
+CloudAccount:
+ description:
+ - Data will be populated when I(gather_subset=cloudaccount) or I(gather_subset=all)
+ - Lists all the configured cloud accounts
+ returned: success
+ type: list
+ elements: dict
+ sample: [{...}]
+CloudAccountUsage:
+ description:
+ - Data will be populated when I(gather_subset=cloudaccountusage) or I(gather_subset=all)
+ - Lists the usage information about the configured cloud storage accounts
+ returned: success
+ type: list
+ elements: dict
+ sample: [{...}]
+CloudBackup:
+ description:
+ - Data will be populated when I(gather_subset=cloudbackup) or I(gather_subset=all)
+ - Lists the volumes that have cloud snapshot that enabled and volumes that have cloud snapshots in the cloud account
+ returned: success
+ type: list
+ elements: dict
+ sample: [{...}]
+CloudBackupGeneration:
+ description:
+ - Data will be populated when I(gather_subset=cloudbackupgeneration)
+ - List any volume snapshots available on the specified volume
+ returned: success
+ type: list
+ elements: dict
+ sample: [{...}]
+CloudImportCandidate:
+ description:
+ - Data will be populated when I(gather_subset=cloudimportcandidate) or I(gather_subset=all)
+ - Lists information about systems that have data that is stored in the cloud accounts
+ returned: success
+ type: list
+ elements: dict
+ sample: [{...}]
+DnsServer:
+ description:
+ - Data will be populated when I(gather_subset=dnsserver) or I(gather_subset=all)
+ - Lists the information for any Domain Name System (DNS) servers in the system
+ returned: success
+ type: list
+ elements: dict
+ sample: [{...}]
+Drive:
+ description:
+ - Data will be populated when I(gather_subset=drive) or I(gather_subset=all)
+ - Lists the configuration information and drive vital product data (VPD)
+ returned: success
+ type: list
+ elements: dict
+ sample: [{...}]
+EmailServer:
+ description:
+ - Data will be populated when I(gather_subset=emailserver) or I(gather_subset=all)
+ - Lists the Email servers that are configured on the system
+ returned: success
+ type: list
+ elements: dict
+ sample: [{...}]
+EmailUser:
+ description:
+ - Data will be populated when I(gather_subset=emailuser) or I(gather_subset=all)
+ - Lists the Email event notification settings for all Email recipients,
+ an individual Email recipient, or a specified type (local or support) of Email recipient
+ returned: success
+ type: list
+ elements: dict
+ sample: [{...}]
+FCConnectivitie:
+ description:
+ - Data will be populated when I(gather_subset=fc) or I(gather_subset=all)
+ - Lists information for FC connectivity
+ returned: success
+ type: list
+ elements: dict
+ sample: [{...}]
+FCConsistgrp:
+ description:
+ - Data will be populated when I(gather_subset=fcconsistgrp) or I(gather_subset=all)
+ - Displays a concise list or a detailed view of flash copy consistency groups
+ returned: success
+ type: list
+ elements: dict
+ sample: [{...}]
+FCMap:
+ description:
+ - Data will be populated when I(gather_subset=fcmap) or I(gather_subset=all)
+ - Lists information for FC maps
+ returned: success
+ type: list
+ elements: dict
+ sample: [{...}]
+FCPort:
+ description:
+ - Data will be populated when I(gather_subset=fcport) or I(gather_subset=all)
+ - Lists information for FC ports
+ returned: success
+ type: list
+ elements: dict
+ sample: [{...}]
+Host:
+ description:
+ - Data will be populated when I(gather_subset=host) or I(gather_subset=all)
+ - Lists information for hosts
+ returned: success
+ type: list
+ elements: dict
+ sample: [{...}]
+HostCluster:
+ description:
+ - Data will be populated when I(gather_subset=hc) or I(gather_subset=all)
+ - Lists information for host clusters
+ returned: success
+ type: list
+ elements: dict
+ sample: [{...}]
+HostVdiskMap:
+ description:
+ - Data will be populated when I(gather_subset=hostvdiskmap) or I(gather_subset=all)
+ - Lists all VDisks mapped to host 'objectname'
+ returned: success
+ type: list
+ elements: dict
+ sample: [{...}]
+IOGroup:
+ description:
+ - Data will be populated when I(gather_subset=iog) or I(gather_subset=all)
+ - Lists information for I/O groups
+ returned: success
+ type: list
+ elements: dict
+ sample: [{...}]
+IP:
+ description:
+ - Data will be populated when I(gather_subset=ip) or I(gather_subset=all)
+ - Lists the currently configured IP addresses
+ returned: success
+ type: list
+ elements: dict
+ sample: [{...}]
+LdapServer:
+ description:
+ - Data will be populated when I(gather_subset=ldapserver) or I(gather_subset=all)
+ - Lists the most recent details for all configured Lightweight Directory Access Protocol (LDAP) servers
+ returned: success
+ type: list
+ elements: dict
+ sample: [{...}]
+Mdisk:
+ description:
+ - Data will be populated when I(gather_subset=mdisk) or I(gather_subset=all)
+ - Displays a concise list or a detailed view of managed disks (MDisks) visible to the system
+ returned: success
+ type: list
+ elements: dict
+ sample: [{...}]
+Node:
+ description:
+ - Data will be populated when I(gather_subset=node) or I(gather_subset=all)
+ - Lists information for nodes
+ returned: success
+ type: list
+ elements: dict
+ sample: [{...}]
+Ownershipgroup:
+ description:
+ - Data will be populated when I(gather_subset=ownershipgroup) or I(gather_subset=all)
+ - Displays the ownership groups that are available in the system
+ returned: success
+ type: list
+ elements: dict
+ sample: [{...}]
+Partnership:
+ description:
+ - Data will be populated when I(gather_subset=partnership) or I(gather_subset=all)
+ - Lists all the clustered systems (systems) that are associated with the local system
+ returned: success
+ type: list
+ elements: dict
+ sample: [{...}]
+Pool:
+ description:
+ - Data will be populated when I(gather_subset=pool) or I(gather_subset=all)
+ - Lists information for mdiskgrps
+ returned: success
+ type: list
+ elements: dict
+ sample: [{...}]
+Portset:
+ description:
+ - Data will be populated when I(gather_subset=portset) or I(gather_subset=all)
+ - Lists the currently configured portset on the system
+ returned: success
+ type: list
+ elements: dict
+ sample: [{...}]
+ProvisioningPolicy:
+ description:
+ - Data will be populated when I(gather_subset=provisioningpolicy) or I(gather_subset=all)
+ - Lists the provisioning policies available on the system
+ returned: success
+ type: list
+ elements: dict
+ sample: [{...}]
+RCConsistgrp:
+ description:
+ - Data will be populated when I(gather_subset=rcconsistgrp) or I(gather_subset=all)
+ - Displays a concise list or a detailed view of remote copy consistency groups
+ returned: success
+ type: list
+ elements: dict
+ sample: [{...}]
+RemoteCopy:
+ description:
+ - Data will be populated when I(gather_subset=rcrelationship) or I(gather_subset=all)
+ - Lists information for remote copy relationships
+ returned: success
+ type: list
+ elements: dict
+ sample: [{...}]
+ReplicationPolicy:
+ description:
+ - Data will be populated when I(gather_subset=replicationpolicy) or I(gather_subset=all)
+ - Lists all the replication policies on the system
+ returned: success
+ type: list
+ elements: dict
+ sample: [{...}]
+SafeguardedPolicy:
+ description:
+ - Data will be populated when I(gather_subset=safeguardedpolicy) or I(gather_subset=all)
+ - Lists the Safeguarded policies available on the system
+ returned: success
+ type: list
+ elements: dict
+ sample: [{...}]
+SafeguardedSchedule:
+ description:
+ - Data will be populated when I(gather_subset=safeguardedpolicyschedule) or I(gather_subset=all)
+ - Displays the Safeguarded backup schedule that is associated with Safeguarded policies
+ returned: success
+ type: list
+ elements: dict
+ sample: [{...}]
+SnapshotPolicy:
+ description:
+ - Data will be populated when I(gather_subset=snapshotpolicy) or I(gather_subset=all)
+ - Lists all the snapshot policies on the system
+ returned: success
+ type: list
+ elements: dict
+ sample: [{...}]
+SnapshotSchedule:
+ description:
+ - Data will be populated when I(gather_subset=snapshotpolicyschedule) or I(gather_subset=all)
+ - Lists all snapshot schedules on the system
+ returned: success
+ type: list
+ elements: dict
+ sample: [{...}]
+Sra:
+ description:
+ - Data will be populated when I(gather_subset=sra) or I(gather_subset=all)
+ - Command to check both secure remote assistance status and the time of the last login
+ returned: success
+ type: list
+ elements: dict
+ sample: [{...}]
+SysLogServer:
+ description:
+ - Data will be populated when I(gather_subset=syslogserver) or I(gather_subset=all)
+ - Lists the syslog servers that are configured on the clustered system
+ returned: success
+ type: list
+ elements: dict
+ sample: [{...}]
+System:
+ description:
+ - Data will be populated when I(gather_subset=system) or I(gather_subset=all)
+ - Displays the storage system information
+ returned: success
+ type: list
+ elements: dict
+ sample: [{...}]
+SystemCert:
+ description:
+ - Data will be populated when I(gather_subset=systemcertificate) or I(gather_subset=all)
+ - Lists the information about the current system Secure Sockets Layer (SSL) certificate
+ returned: success
+ type: list
+ elements: dict
+ sample: [{...}]
+TargetPortFC:
+ description:
+ - Data will be populated when I(gather_subset=targetportfc) or I(gather_subset=all)
+ - Lists information for WWPN which is required to set up FC zoning and to display
+ the current failover status of host I/O ports
+ returned: success
+ type: list
+ elements: dict
+ sample: [{...}]
+TrustStore:
+ description:
+ - Data will be populated when I(gather_subset=truststore) or I(gather_subset=all)
+ - Lists the current certificate stores
+ returned: success
+ type: list
+ elements: dict
+ sample: [{...}]
+User:
+ description:
+ - Data will be populated when I(gather_subset=user) or I(gather_subset=all)
+ - Lists all the users that are created on the system
+ returned: success
+ type: list
+ elements: dict
+ sample: [{...}]
+UserGrp:
+ description:
+ - Data will be populated when I(gather_subset=usergroup) or I(gather_subset=all)
+ - Lists the user groups that is created on the system
+ returned: success
+ type: list
+ elements: dict
+ sample: [{...}]
+VdiskCopy:
+ description:
+ - Data will be populated when I(gather_subset=vdiskcopy) or I(gather_subset=all)
+ - Lists information for volume copy
+ returned: success
+ type: list
+ elements: dict
+ sample: [{...}]
+VdiskHostMap:
+ description:
+ - Data will be populated when I(gather_subset=vdiskhostmap) or I(gather_subset=all)
+ - Lists all hosts the VDisk 'objectname' is mapped to
+ returned: success
+ type: list
+ elements: dict
+ sample: [{...}]
+Volume:
+ description:
+ - Data will be populated when I(gather_subset=vol) or I(gather_subset=all)
+ - Lists information for VDisks
+ returned: success
+ type: list
+ elements: dict
+ sample: [{...}]
+VolumeGroup:
+ description:
+ - Data will be populated when I(gather_subset=volumegroup) or I(gather_subset=all)
+ - Lists all volume groups on the system
+ returned: success
+ type: list
+ elements: dict
+ sample: [{...}]
+VolumePopulation:
+ description:
+ - Data will be populated when I(gather_subset=volumepopulation) or I(gather_subset=all)
+ - Lists information about volumes of type clone or thinclone
+ returned: success
+ type: list
+ elements: dict
+ sample: [{...}]
+VolumeGroupPopulation:
+ description:
+ - Data will be populated when I(gather_subset=volumegrouppopulation) or I(gather_subset=all)
+ - Lists information about volume groups of type clone or thinclone including source and in-progress restore
+ returned: success
+ type: list
+ elements: dict
+ sample: [{...}]
+VolumeGroupSnapshot:
+ description:
+ - Data will be populated when I(gather_subset=volumegroupsnapshot) or I(gather_subset=all)
+ - Lists the snapshot objects available on the system based on volume group
+ returned: success
+ type: list
+ elements: dict
+ sample: [{...}]
+VolumeGroupSnapshotPolicy:
+ description:
+ - Data will be populated when I(gather_subset=volumegroupsnapshotpolicy) or I(gather_subset=all)
+ - Lists view snapshot objects on the system
+ returned: success
+ type: list
+ elements: dict
+ sample: [{...}]
+VolumeSnapshot:
+ description:
+ - Data will be populated when I(gather_subset=volumesnapshot) or I(gather_subset=all)
+ - Lists all volume snapshots
+ returned: success
+ type: list
+ elements: dict
+ sample: [{...}]
+iSCSIPort:
+ description:
+ - Data will be populated when I(gather_subset=iscsiport) or I(gather_subset=all)
+ - Lists information for iSCSI ports
+ returned: success
+ type: list
+ elements: dict
+ sample: [{...}]
+EventLog:
+ description:
+ - Data will be populated when I(gather_subset=eventlog) or I(gather_subset=all)
+ - Lists information about the system event log
+ returned: success
+ type: list
+ elements: dict
+ sample: [{...}]
+EnclosureStats:
+ description:
+ - Data will be populated when I(gather_subset=enclosurestats) or I(gather_subset=all)
+ - Lists the most recent values (averaged) of all enclosure statistics.
+ returned: success
+ type: list
+ elements: dict
+ sample: [{...}]
+EnclosureStatsHistory:
+ description:
+ - Data will be populated when I(gather_subset=enclosurestatshistory) or I(gather_subset=all)
+ - Lists the history values of all enclosure statistics including power consumed,
+ temperature in fahrenheit and temperature in celsius.
+ returned: success
+ type: list
+ elements: dict
+ sample: [{...}]
+DriveClass:
+ description:
+ - Data will be populated when I(gather_subset=driveclass) or I(gather_subset=all)
+ - List all drive classes in the system
+ returned: success
+ type: list
+ elements: dict
+ sample: [{...}]
+Security:
+ description:
+ - Data will be populated when I(gather_subset=security) or I(gather_subset=all)
+ - Displays current security settings of the system
+ returned: success
+ type: list
+ elements: dict
+ sample: [{...}]
+Partition:
+ description:
+ - Data will be populated when I(gather_subset=partition) or I(gather_subset=all)
+ - Displays all storage partitions
+ returned: success
+ type: list
+ elements: dict
+ sample: [{...}]
+Plugin:
+ description:
+ - Data will be populated when I(gather_subset=plugin) or I(gather_subset=all)
+ - Displays all registered plugins
+ returned: success
+ type: list
+ elements: dict
+ sample: [{...}]
+Volumegroupreplication:
+ description:
+ - Data will be populated when I(gather_subset=volumegroupreplication) or I(gather_subset=all)
+ - Displays all replication for the volumegroup
+ returned: success
+ type: list
+ elements: dict
+ sample: [{...}]
+Quorum:
+ description:
+ - Data will be populated when I(gather_subset=quorum) or I(gather_subset=all)
+ - list the quorum devices that the system uses to store quorum data.
+ returned: success
+ type: list
+ elements: dict
+ sample: [{...}]
+Enclosure:
+ description:
+ - Data will be populated when I(gather_subset=enclosure) or I(gather_subset=all)
+ - Displays a summary of the enclosures.
+ returned: success
+ type: list
+ elements: dict
+ sample: [{...}]
+Snmpserver:
+ description:
+ - Data will be populated when I(gather_subset=snmpserver) or I(gather_subset=all)
+ - Display a concise list or a detailed view of SNMP servers that are configured on the system
+ returned: success
+ type: list
+ elements: dict
+ sample: [{...}]
+Testldapserver:
+ description:
+ - Data will be populated when I(gather_subset=testldapserver)
+ - Tests a Lightweight Directory Access Protocol (LDAP) server.
+ returned: success
+ type: list
+ elements: dict
+ sample: [{...}]
+'''
+
+from traceback import format_exc
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi, svc_argument_spec, get_logger
+from ansible.module_utils._text import to_native
+
+
+class IBMSVCGatherInfo(object):
+ def __init__(self):
+ argument_spec = svc_argument_spec()
+
+ argument_spec.update(
+ dict(
+ objectname=dict(type='str'),
+ gather_subset=dict(type='list', elements='str', required=False,
+ default=['all'],
+ choices=['vol',
+ 'pool',
+ 'node',
+ 'iog',
+ 'host',
+ 'hostvdiskmap',
+ 'vdiskhostmap',
+ 'hc',
+ 'fc',
+ 'fcport',
+ 'targetportfc',
+ 'iscsiport',
+ 'fcmap',
+ 'rcrelationship',
+ 'fcconsistgrp',
+ 'rcconsistgrp',
+ 'vdiskcopy',
+ 'array',
+ 'system',
+ 'cloudaccount',
+ 'cloudaccountusage',
+ 'cloudimportcandidate',
+ 'ldapserver',
+ 'drive',
+ 'user',
+ 'usergroup',
+ 'ownershipgroup',
+ 'partnership',
+ 'replicationpolicy',
+ 'cloudbackup',
+ 'cloudbackupgeneration',
+ 'snapshotpolicy',
+ 'snapshotpolicyschedule',
+ 'volumegroup',
+ 'volumepopulation',
+ 'volumegrouppopulation',
+ 'volumegroupsnapshotpolicy',
+ 'volumesnapshot',
+ 'dnsserver',
+ 'systemcertificate',
+ 'truststore',
+ 'sra',
+ 'syslogserver',
+ 'emailserver',
+ 'emailuser',
+ 'provisioningpolicy',
+ 'volumegroupsnapshot',
+ 'callhome',
+ 'ip',
+ 'portset',
+ 'safeguardedpolicy',
+ 'mdisk',
+ 'safeguardedpolicyschedule',
+ 'eventlog',
+ 'enclosurestats',
+ 'enclosurestatshistory',
+ 'driveclass',
+ 'security',
+ 'partition',
+ 'plugin',
+ 'volumegroupreplication',
+ 'quorum',
+ 'enclosure',
+ 'snmpserver',
+ 'testldapserver',
+ 'all'
+ ]),
+ )
+ )
+
+ self.module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ # logging setup
+ log_path = self.module.params['log_path']
+ self.log = get_logger(self.__class__.__name__, log_path)
+ self.objectname = self.module.params['objectname']
+
+ self.restapi = IBMSVCRestApi(
+ module=self.module,
+ clustername=self.module.params['clustername'],
+ domain=self.module.params['domain'],
+ username=self.module.params['username'],
+ password=self.module.params['password'],
+ validate_certs=self.module.params['validate_certs'],
+ log_path=log_path,
+ token=self.module.params['token']
+ )
+
+ def validate(self, subset):
+ if not self.objectname:
+ self.module.fail_json(msg='Following paramter is mandatory to execute {0}: objectname'.format(subset))
+
+ @property
+ def cloudbackupgeneration(self):
+ return self.restapi.svc_obj_info(
+ cmd='lsvolumebackupgeneration',
+ cmdopts={'volume': self.objectname},
+ cmdargs=None
+ )
+
+ @property
+ def enclosurestatshistory(self):
+ return self.restapi.svc_obj_info(
+ cmd='lsenclosurestats',
+ cmdopts={'history': 'power_w:temp_c:temp_f'},
+ cmdargs=[self.objectname]
+ )
+
+ def get_list(self, subset, op_key, cmd, validate):
+ try:
+ if validate:
+ self.validate(subset)
+ output = {}
+ exceptions = {'cloudbackupgeneration', 'enclosurestatshistory'}
+ if subset in exceptions:
+ output[op_key] = getattr(self, subset)
+ else:
+ cmdargs = [self.objectname] if self.objectname else None
+ output[op_key] = self.restapi.svc_obj_info(cmd=cmd,
+ cmdopts=None,
+ cmdargs=cmdargs)
+ self.log.info('Successfully listed %d %s info '
+ 'from cluster %s', len(subset), subset,
+ self.module.params['clustername'])
+ return output
+ except Exception as e:
+ msg = 'Get %s info from cluster %s failed with error %s ' % \
+ (subset, self.module.params['clustername'], str(e))
+ self.log.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def apply(self):
+ subset = self.module.params['gather_subset']
+ if self.objectname and len(subset) != 1:
+ msg = ("objectname(%s) is specified while gather_subset(%s) is not "
+ "one of %s" % (self.objectname, self.subset, all))
+ self.module.fail_json(msg=msg)
+ if len(subset) == 0 or 'all' in subset:
+ self.log.info("The default value for gather_subset is all")
+
+ result = {
+ 'Volume': [],
+ 'Pool': [],
+ 'Node': [],
+ 'IOGroup': [],
+ 'Host': [],
+ 'HostVdiskMap': [],
+ 'VdiskHostMap': [],
+ 'HostCluster': [],
+ 'FCConnectivitie': [],
+ 'FCConsistgrp': [],
+ 'RCConsistgrp': [],
+ 'VdiskCopy': [],
+ 'FCPort': [],
+ 'TargetPortFC': [],
+ 'iSCSIPort': [],
+ 'FCMap': [],
+ 'RemoteCopy': [],
+ 'Array': [],
+ 'System': [],
+ 'CloudAccount': [],
+ 'CloudAccountUsage': [],
+ 'CloudImportCandidate': [],
+ 'LdapServer': [],
+ 'Drive': [],
+ 'User': [],
+ 'Partnership': [],
+ 'ReplicationPolicy': [],
+ 'SnapshotPolicy': [],
+ 'VolumeGroup': [],
+ 'VolumePopulation': [],
+ 'VolumeGroupPopulation': [],
+ 'SnapshotSchedule': [],
+ 'VolumeGroupSnapshotPolicy': [],
+ 'VolumeSnapshot': [],
+ 'DnsServer': [],
+ 'SystemCert': [],
+ 'TrustStore': [],
+ 'Sra': [],
+ 'SysLogServer': [],
+ 'UserGrp': [],
+ 'EmailServer': [],
+ 'EmailUser': [],
+ 'CloudBackup': [],
+ 'CloudBackupGeneration': [],
+ 'ProvisioningPolicy': [],
+ 'VolumeGroupSnapshot': [],
+ 'CallHome': [],
+ 'IP': [],
+ 'Ownershipgroup': [],
+ 'Portset': [],
+ 'SafeguardedPolicy': [],
+ 'Mdisk': [],
+ 'SafeguardedSchedule': [],
+ 'EventLog': [],
+ 'DriveClass': [],
+ 'Security': [],
+ 'Partition': [],
+ 'Plugin': [],
+ 'Volumegroupreplication': [],
+ 'Quorum': [],
+ 'Enclosure': [],
+ 'Snmpserver': [],
+ 'Testldapserver': []
+ }
+
+ cmd_mappings = {
+ 'vol': ('Volume', 'lsvdisk', False, None),
+ 'pool': ('Pool', 'lsmdiskgrp', False, None),
+ 'node': ('Node', 'lsnode', False, None),
+ 'iog': ('IOGroup', 'lsiogrp', False, None),
+ 'host': ('Host', 'lshost', False, None),
+ 'hostvdiskmap': ('HostVdiskMap', 'lshostvdiskmap', False, None),
+ 'vdiskhostmap': ('VdiskHostMap', 'lsvdiskhostmap', True, None),
+ 'hc': ('HostCluster', 'lshostcluster', False, '7.7.1.0'),
+ 'fc': ('FCConnectivitie', 'lsfabric', False, None),
+ 'fcport': ('FCPort', 'lsportfc', False, None),
+ 'iscsiport': ('iSCSIPort', 'lsportip', False, None),
+ 'fcmap': ('FCMap', 'lsfcmap', False, None),
+ 'rcrelationship': ('RemoteCopy', 'lsrcrelationship', False, None),
+ 'fcconsistgrp': ('FCConsistgrp', 'lsfcconsistgrp', False, None),
+ 'rcconsistgrp': ('RCConsistgrp', 'lsrcconsistgrp', False, None),
+ 'vdiskcopy': ('VdiskCopy', 'lsvdiskcopy', False, None),
+ 'targetportfc': ('TargetPortFC', 'lstargetportfc', False, '7.7.0.0'),
+ 'array': ('Array', 'lsarray', False, None),
+ 'system': ('System', 'lssystem', False, '6.3.0.0'),
+ 'cloudaccount': ('CloudAccount', 'lscloudaccount', False, '7.8.0.0'),
+ 'cloudaccountusage': ('CloudAccountUsage', 'lscloudaccountusage', False, '7.8.0.0'),
+ 'cloudimportcandidate': ('CloudImportCandidate', 'lscloudaccountimportcandidate', False, '7.8.0.0'),
+ 'ldapserver': ('LdapServer', 'lsldapserver', False, '6.3.0.0'),
+ 'drive': ('Drive', 'lsdrive', False, None),
+ 'user': ('User', 'lsuser', False, None),
+ 'usergroup': ('UserGrp', 'lsusergrp', False, None),
+ 'ownershipgroup': ('Ownershipgroup', 'lsownershipgroup', False, '8.3.0.0'),
+ 'partnership': ('Partnership', 'lspartnership', False, '6.3.0.0'),
+ 'replicationpolicy': ('ReplicationPolicy', 'lsreplicationpolicy', False, '8.5.2.0'),
+ 'cloudbackup': ('CloudBackup', 'lsvolumebackup', False, '7.8.0.0'),
+ 'cloudbackupgeneration': ('CloudBackupGeneration', 'lsvolumebackupgeneration', True, '7.8.0.0'),
+ 'snapshotpolicy': ('SnapshotPolicy', 'lssnapshotpolicy', False, '8.5.1.0'),
+ 'snapshotpolicyschedule': ('SnapshotSchedule', 'lssnapshotschedule', False, '8.5.1.0'),
+ 'volumegroup': ('VolumeGroup', 'lsvolumegroup', False, '7.8.0.0'),
+ 'volumepopulation': ('VolumePopulation', 'lsvolumepopulation', False, '8.5.1.0'),
+ 'volumegrouppopulation': ('VolumeGroupPopulation', 'lsvolumegrouppopulation', False, '8.5.1.0'),
+ 'volumegroupsnapshotpolicy': ('VolumeGroupSnapshotPolicy', 'lsvolumegroupsnapshotpolicy', False, '8.5.1.0'),
+ 'volumesnapshot': ('VolumeSnapshot', 'lsvolumesnapshot', False, '8.5.1.0'),
+ 'dnsserver': ('DnsServer', 'lsdnsserver', False, '7.8.0.0'),
+ 'systemcertificate': ('SystemCert', 'lssystemcert', False, '7.6.0.0'),
+ 'truststore': ('TrustStore', 'lstruststore', False, '8.5.1.0'),
+ 'sra': ('Sra', 'lssra', False, '7.7.0.0'),
+ 'syslogserver': ('SysLogServer', 'lssyslogserver', False, None),
+ 'emailserver': ('EmailServer', 'lsemailserver', False, None),
+ 'emailuser': ('EmailUser', 'lsemailuser', False, None),
+ 'provisioningpolicy': ('ProvisioningPolicy', 'lsprovisioningpolicy', False, '8.4.1.0'),
+ 'volumegroupsnapshot': ('VolumeGroupSnapshot', 'lsvolumegroupsnapshot', False, '8.5.1.0'),
+ 'callhome': ('CallHome', 'lscloudcallhome', False, '8.2.1.0'),
+ 'ip': ('IP', 'lsip', False, '8.4.2.0'),
+ 'portset': ('Portset', 'lsportset', False, '8.4.2.0'),
+ 'safeguardedpolicy': ('SafeguardedPolicy', 'lssafeguardedpolicy', False, '8.4.2.0'),
+ 'mdisk': ('Mdisk', 'lsmdisk', False, None),
+ 'safeguardedpolicyschedule': ('SafeguardedSchedule', 'lssafeguardedschedule', False, '8.4.2.0'),
+ 'eventlog': ('EventLog', 'lseventlog', False, None),
+ 'enclosurestats': ('EnclosureStats', 'lsenclosurestats', False, None),
+ 'enclosurestatshistory': ('EnclosureStatsHistory', 'lsenclosurestats -history power_w:temp_c:temp_f', True, None),
+ 'driveclass': ('DriveClass', 'lsdriveclass', False, '7.6.0.0'),
+ 'security': ('Security', 'lssecurity', False, '7.4.0.0'),
+ 'partition': ('Partition', 'lspartition', False, '8.6.1.0'),
+ 'plugin': ('Plugin', 'lsplugin', False, '8.6.0.0'),
+ 'volumegroupreplication': ('Volumegroupreplication', 'lsvolumegroupreplication', False, '8.5.2.0'),
+ 'quorum': ('Quorum', 'lsquorum', False, None),
+ 'enclosure': ('Enclosure', 'lsenclosure', False, None),
+ 'snmpserver': ('Snmpserver', 'lssnmpserver', False, None),
+ 'testldapserver': ('Testldapserver', 'testldapserver', False, '6.3.0.0')
+ }
+
+ if subset == ['all']:
+ current_set = cmd_mappings.keys()
+ else:
+ current_set = subset
+ build_version = ''
+ for key in current_set:
+ value_tuple = cmd_mappings[key]
+ if subset == ['all']:
+ version = value_tuple[3]
+ if value_tuple[2]:
+ continue
+ elif not version:
+ pass
+ else:
+ if build_version == '':
+ system_info = self.restapi.svc_obj_info(cmd='lssystem', cmdargs=[], cmdopts=None)
+ build_version = (system_info['code_level'].split(" ")[0]).split(".")
+ version = value_tuple[3].split('.')
+ flag = True
+ for idx in range(4):
+ if int(version[idx]) > int(build_version[idx]):
+ flag = False
+ elif int(version[idx]) < int(build_version[idx]):
+ break
+ if not flag:
+ continue
+ op = self.get_list(key, *value_tuple[:3])
+ result.update(op)
+
+ self.module.exit_json(**result)
+
+
+def main():
+ v = IBMSVCGatherInfo()
+ try:
+ v.apply()
+ except Exception as e:
+ v.log.debug("Exception in apply(): \n%s", format_exc())
+ v.module.fail_json(msg="Module failed. Error [%s]." % to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_initial_setup.py b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_initial_setup.py
new file mode 100644
index 000000000..10447be32
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_initial_setup.py
@@ -0,0 +1,599 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2021 IBM CORPORATION
+# Author(s): Shilpi Jain <shilpi.jain1@ibm.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_svc_initial_setup
+short_description: This module allows users to manage the initial setup configuration on IBM Storage Virtualize family systems
+version_added: "1.7.0"
+description:
+ - Ansible interface to perform various initial system configuration
+options:
+ clustername:
+ description:
+ - The hostname or management IP of the Storage Virtualize system.
+ required: true
+ type: str
+ domain:
+ description:
+ - Domain for the Storage Virtualize system.
+ - Valid when hostname is used for the parameter I(clustername).
+ type: str
+ username:
+ description:
+ - REST API username for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ password:
+ description:
+ - REST API password for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ token:
+ description:
+ - The authentication token to verify a user on the Storage Virtualize system.
+ - To generate a token, use the M(ibm.storage_virtualize.ibm_svc_auth) module.
+ type: str
+ log_path:
+ description:
+ - Path of debug log file.
+ type: str
+ validate_certs:
+ description:
+ - Validates certification.
+ default: false
+ type: bool
+ system_name:
+ description:
+ - Specifies system name.
+ type: str
+ dnsname:
+ description:
+ - Specifies a unique name for the system DNS server being created.
+ - Maximum two DNS servers can be configured. User needs to provide the complete list of DNS servers that are required to be configured.
+ type: list
+ elements: str
+ dnsip:
+ description:
+ - Specifies the DNS server Internet Protocol (IP) address.
+ type: list
+ elements: str
+ ntpip:
+ description:
+ - Specifies the IPv4 address or fully qualified domain name (FQDN) for the Network Time Protocol (NTP) server.
+ - To remove an already configured NTP IP, user must specify 0.0.0.0.
+ type: str
+ time:
+ description:
+ - Specifies the time to which the system must be set.
+ - This value must be in the following format MMDDHHmmYYYY (where M is month, D is day, H is hour, m is minute, and Y is year).
+ type: str
+ timezone:
+ description:
+ - Specifies the time zone to set for the system.
+ type: str
+ license_key:
+ description:
+ - Provides the license key to activate a feature that contains 16 hexadecimal characters organized in four groups
+ of four numbers with each group separated by a hyphen (such as 0123-4567-89AB-CDEF).
+ type: list
+ elements: str
+ remote:
+ description:
+ - Changes system licensing for remote-copy functions such as Metro Mirror, Global Mirror, and HyperSwap.
+ - Depending on the type of system, specify a capacity value in terabytes (TB) or specify the total number of
+ internal and external enclosures that user has licensed on the system.
+ There must be an enclosure license for all enclosures.
+ type: int
+ virtualization:
+ description:
+ - Changes system licensing for the Virtualization function.
+ - Depending on the type of system, specify a capacity value in terabytes (TB) or specify the total number of
+ storage capacity units (SCUs) that user is licensed to virtualize across tiers of storage on the system or
+ specify the number of enclosures of external storage that user is authorized to use.
+ type: int
+ compression:
+ description:
+ - Changes system licensing for the compression function.
+ - Depending on the type of system, specify a capacity value in terabytes (TB) or specify the total number of
+ storage capacity units (SCUs) that user is licensed to virtualize across tiers of storage on the system or
+ specify the total number of internal and external enclosures that user has licensed on the system.
+ type: int
+ flash:
+ description:
+ - Changes system licensing for the FlashCopy function.
+ - Depending on the type of system, specify a capacity value in terabytes (TB) or specify the total number of
+ internal and external enclosures for the FlashCopy function.
+ type: int
+ cloud:
+ description:
+ - Specifies the number of enclosures for the transparent cloud tiering function.
+ type: int
+ easytier:
+ description:
+ - Specifies the number of enclosures on which user can run Easy Tier.
+ type: int
+ physical_flash:
+ description:
+ - For physical disk licensing, this parameter enables or disables the FlashCopy function.
+ type: str
+ choices: [ 'on', 'off' ]
+ default: 'off'
+ encryption:
+ description:
+ - Specifies whether the encryption license function is enabled or disabled.
+ type: str
+ choices: [ 'on', 'off' ]
+author:
+ - Shilpi Jain (@Shilpi-J)
+notes:
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = '''
+- name: Initial configuration on FlashSystem 9200
+ ibm.storage_virtualize.ibm_svc_initial_setup:
+ clustername: "{{clustername}}"
+ domain: "{{domain}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ log_path: /tmp/playbook.debug
+ system_name: cluster_test_0
+ time: 101009142021
+ timezone: 200
+ remote: 50
+ virtualization: 50
+ flash: 50
+ license_key:
+ - 0123-4567-89AB-CDEF
+ - 8921-4567-89AB-GHIJ
+- name: Add DNS servers
+ ibm.storage_virtualize.ibm_svc_initial_setup:
+ clustername: "{{clustername}}"
+ domain: "{{domain}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ log_path: /tmp/playbook.debug
+ system_name: cluster_test_
+ dnsname:
+ - dns_01
+ - dns_02
+ dnsip:
+ - '1.1.1.1'
+ - '2.2.2.2'
+- name: Delete dns_02 server
+ ibm.storage_virtualize.ibm_svc_initial_setup:
+ clustername: "{{clustername}}"
+ domain: "{{domain}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ log_path: /tmp/playbook.debug
+ system_name: cluster_test_
+ dnsname:
+ - dns_01
+ dnsip:
+ - '1.1.1.1'
+'''
+
+RETURN = '''#'''
+
+from traceback import format_exc
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi, svc_argument_spec, get_logger
+from ansible.module_utils._text import to_native
+
+
+class IBMSVCInitialSetup(object):
+ def __init__(self):
+ argument_spec = svc_argument_spec()
+
+ argument_spec.update(
+ dict(
+ system_name=dict(type='str'),
+ dnsname=dict(type='list', elements='str'),
+ dnsip=dict(type='list', elements='str'),
+ ntpip=dict(type='str'),
+ time=dict(type='str'),
+ timezone=dict(type='str'),
+ license_key=dict(type='list', elements='str', no_log=True),
+ remote=dict(type='int'),
+ virtualization=dict(type='int'),
+ flash=dict(type='int'),
+ compression=dict(type='int'),
+ cloud=dict(type='int'),
+ easytier=dict(type='int'),
+ physical_flash=dict(type='str', default='off', choices=['on', 'off']),
+ encryption=dict(type='str', choices=['on', 'off'])
+ )
+ )
+
+ self.module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ # logging setup
+ log_path = self.module.params['log_path']
+ log = get_logger(self.__class__.__name__, log_path)
+ self.log = log.info
+
+ self.system_data = ""
+ self.changed = False
+ self.message = ""
+
+ # Optional
+ self.systemname = self.module.params.get('system_name', '')
+ self.dnsname = self.module.params.get('dnsname', '')
+ self.dnsip = self.module.params.get('dnsip', '')
+ self.ntpip = self.module.params.get('ntpip', '')
+ self.time = self.module.params.get('time', '')
+ self.timezone = self.module.params.get('timezone', '')
+
+ # license related parameters
+ self.license_key = self.module.params.get('license_key', '')
+ self.remote = self.module.params.get('remote', '')
+ self.virtualization = self.module.params.get('virtualization', '')
+ self.compression = self.module.params.get('compression', '')
+ self.flash = self.module.params.get('flash', '')
+ self.cloud = self.module.params.get('cloud', '')
+ self.easytier = self.module.params.get('easytier', '')
+ self.physical_flash = self.module.params.get('physical_flash', '')
+ self.encryption = self.module.params.get('encryption', '')
+
+ self.restapi = IBMSVCRestApi(
+ module=self.module,
+ clustername=self.module.params['clustername'],
+ domain=self.module.params['domain'],
+ username=self.module.params['username'],
+ password=self.module.params['password'],
+ validate_certs=self.module.params['validate_certs'],
+ log_path=log_path,
+ token=self.module.params['token']
+ )
+
+ def basic_checks(self):
+ if self.time and self.ntpip:
+ self.module.fail_json(msg='Either NTP or time should be given')
+
+ if self.dnsname and self.dnsip:
+ if len(self.dnsname) != len(self.dnsip):
+ self.module.fail_json(msg='To configure DNS, DNS IP and DNS server name must be given.')
+
+ def get_system_info(self):
+ self.log("Entering function get_system_info")
+ self.system_data = self.restapi.svc_obj_info(cmd='lssystem', cmdopts=None, cmdargs=None)
+ return self.system_data
+
+ def systemname_update(self):
+ cmd = 'chsystem'
+ cmdopts = {}
+ cmdopts['name'] = self.systemname
+
+ self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
+ # Any error will have been raised in svc_run_command
+ self.changed = True
+ self.log("System Name: %s updated", cmdopts)
+ self.message += " System name [%s] updated." % self.systemname
+
+ def ntp_update(self, ip):
+ cmd = 'chsystem'
+ cmdopts = {}
+ cmdopts['ntpip'] = ip
+
+ self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
+ # Any error will have been raised in svc_run_command
+ self.changed = True
+ self.log("NTP IP: %s updated", cmdopts)
+ if self.ntpip:
+ self.message += " NTP IP [%s] updated." % self.ntpip
+
+ def systemtime_update(self):
+ cmd = 'setsystemtime'
+ cmdopts = {}
+ cmdopts['time'] = self.time
+
+ self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
+ # Any error will have been raised in svc_run_command
+ self.changed = True
+ self.log("Time: %s updated", self.time)
+ self.message += " Time [%s] updated." % self.time
+
+ def timezone_update(self):
+ cmd = 'settimezone'
+ cmdopts = {}
+ cmdopts['timezone'] = self.timezone
+
+ self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
+ # Any error will have been raised in svc_run_command
+ # chhost does not output anything when successful.
+ self.changed = True
+ self.log("Properties: Time zone %s updated", self.timezone)
+ self.message += " Timezone [%s] updated." % self.timezone
+
+ def system_update(self, data):
+ name_change_required = False
+ ntp_change_required = False
+ time_change_required = False
+ timezone_change_required = False
+ tz = (None, None)
+
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ if self.systemname and self.systemname != data['name']:
+ self.log("Name change detected")
+ name_change_required = True
+ if self.ntpip and self.ntpip != data['cluster_ntp_IP_address']:
+ self.log("NTP change detected")
+ ntp_change_required = True
+ if self.time and data['cluster_ntp_IP_address'] is not None:
+ self.log("TIME change detected, clearing NTP IP")
+ ntp_change_required = True
+ if self.time:
+ self.log("TIME change detected")
+ time_change_required = True
+ if data['time_zone']:
+ tz = data['time_zone'].split(" ", 1)
+ if self.timezone and (tz[0] != self.timezone):
+ timezone_change_required = True
+
+ if name_change_required:
+ self.systemname_update()
+ if ntp_change_required:
+ self.log("updating system properties '%s, %s'", self.systemname, self.ntpip)
+ if self.ntpip:
+ ip = self.ntpip
+ if self.time and ntp_change_required:
+ ip = '0.0.0.0'
+ self.ntp_update(ip)
+
+ if time_change_required:
+ self.systemtime_update()
+
+ if timezone_change_required:
+ self.timezone_update()
+
+ def get_existing_dnsservers(self):
+ merged_result = []
+
+ data = self.restapi.svc_obj_info(cmd='lsdnsserver', cmdopts=None, cmdargs=None)
+
+ if isinstance(data, list):
+ for d in data:
+ merged_result.append(d)
+ else:
+ merged_result = data
+
+ return merged_result
+
+ def dns_configure(self):
+ dns_add_remove = False
+ modify = {}
+ existing_dns = {}
+ existing_dns_server = []
+ existing_dns_ip = []
+
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ dns_data = self.get_existing_dnsservers()
+ self.log("dns_data=%s", dns_data)
+
+ if (self.dnsip and self.dnsname) or (self.dnsip == "" and self.dnsname == ""):
+ for server in dns_data:
+ existing_dns_server.append(server['name'])
+ existing_dns_ip.append(server['IP_address'])
+ existing_dns[server['name']] = server['IP_address']
+ for name, ip in zip(self.dnsname, self.dnsip):
+ if name == 'None':
+ self.log(" Empty DNS configuration is provided.")
+ return
+ if name in existing_dns:
+ if existing_dns[name] != ip:
+ self.log("update, diff IP.")
+ modify[name] = ip
+ else:
+ self.log("no update, same IP.")
+
+ if (set(existing_dns_server)).symmetric_difference(set(self.dnsname)):
+ dns_add_remove = True
+
+ if modify:
+ for item in modify:
+ self.restapi.svc_run_command(
+ 'chdnsserver',
+ {'ip': modify[item]}, [item]
+ )
+ self.changed = True
+ self.message += " DNS %s modified." % modify
+
+ if dns_add_remove:
+ to_be_added, to_be_removed = False, False
+ to_be_removed = list(set(existing_dns_server) - set(self.dnsname))
+ if to_be_removed:
+ for item in to_be_removed:
+ self.restapi.svc_run_command(
+ 'rmdnsserver', None,
+ [item]
+ )
+ self.changed = True
+ self.message += " DNS server %s removed." % to_be_removed
+
+ to_be_added = list(set(self.dnsname) - set(existing_dns_server))
+ to_be_added_ip = list(set(self.dnsip) - set(existing_dns_ip))
+ if any(to_be_added):
+ for dns_name, dns_ip in zip(to_be_added, to_be_added_ip):
+ if dns_name:
+ self.log('%s %s', dns_name, dns_ip)
+ self.restapi.svc_run_command(
+ 'mkdnsserver',
+ {'name': dns_name, 'ip': dns_ip}, cmdargs=None
+ )
+ self.changed = True
+ self.message += " DNS server %s added." % to_be_added
+ elif not modify:
+ self.log("No DNS Changes")
+
+ def license_probe(self):
+ props = []
+
+ cmd = 'lslicense'
+ cmdopts = {}
+ data = self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
+
+ if self.remote and int(data['license_remote']) != self.remote:
+ props += ['remote']
+ if self.virtualization and int(data['license_virtualization']) != self.virtualization:
+ props += ['virtualization']
+ if self.compression:
+ if (self.system_data['product_name'] == "IBM Storwize V7000") or (self.system_data['product_name'] == "IBM FlashSystem 7200"):
+ if (int(data['license_compression_enclosures']) != self.compression):
+ self.log("license_compression_enclosure=%d", int(data['license_compression_enclosures']))
+ props += ['compression']
+ else:
+ if (int(data['license_compression_capacity']) != self.compression):
+ self.log("license_compression_capacity=%d", int(data['license_compression_capacity']))
+ props += ['compression']
+ if self.flash and int(data['license_flash']) != self.flash:
+ props += ['flash']
+ if self.cloud and int(data['license_cloud_enclosures']) != self.cloud:
+ props += ['cloud']
+ if self.easytier and int(data['license_easy_tier']) != self.easytier:
+ props += ['easytier']
+ if self.physical_flash and data['license_physical_flash'] != self.physical_flash:
+ props += ['physical_flash']
+
+ self.log("props: %s", props)
+ return props
+
+ def license_update(self, modify):
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ cmd = 'chlicense'
+
+ for license in modify:
+ cmdopts = {}
+ cmdopts[license] = getattr(self, license)
+ self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
+
+ self.changed = True if modify else False
+
+ if self.encryption:
+ cmdopts = {}
+ cmdopts['encryption'] = self.encryption
+ self.changed = True
+ self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
+
+ self.log("Licensed functions %s updated", modify)
+ self.message += " Licensed functions %s updated." % modify
+
+ def license_key_update(self):
+ existing_license_keys = []
+ license_id_pairs = {}
+ license_add_remove = False
+
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ for key in self.license_key:
+ if key == 'None':
+ self.log(" Empty License key list provided")
+ return
+
+ cmd = 'lsfeature'
+ cmdopts = {}
+ feature_list = self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
+ for feature in feature_list:
+ existing_license_keys.append(feature['license_key'])
+ license_id_pairs[feature['license_key']] = feature['id']
+ self.log("existing licenses=%s, license_id_pairs=%s", existing_license_keys, license_id_pairs)
+
+ if (set(existing_license_keys)).symmetric_difference(set(self.license_key)):
+ license_add_remove = True
+
+ if license_add_remove:
+ deactivate_license_keys, activate_license_keys = False, False
+ deactivate_license_keys = list(set(existing_license_keys) - set(self.license_key))
+ self.log('deactivate_license_keys %s ', deactivate_license_keys)
+ if deactivate_license_keys:
+ for item in deactivate_license_keys:
+ if not item:
+ self.log('%s item', [license_id_pairs[item]])
+ self.restapi.svc_run_command(
+ 'deactivatefeature',
+ None, [license_id_pairs[item]]
+ )
+ self.changed = True
+ self.log('%s deactivated', deactivate_license_keys)
+ self.message += " License %s deactivated." % deactivate_license_keys
+
+ activate_license_keys = list(set(self.license_key) - set(existing_license_keys))
+ self.log('activate_license_keys %s ', activate_license_keys)
+ if activate_license_keys:
+ for item in activate_license_keys:
+ if item:
+ self.restapi.svc_run_command(
+ 'activatefeature',
+ {'licensekey': item}, None
+ )
+ self.changed = True
+ self.log('%s activated', activate_license_keys)
+ self.message += " License %s activated." % activate_license_keys
+ else:
+ self.message += " No license Changes."
+
+ def apply(self):
+ msg = None
+ modify = []
+
+ self.basic_checks()
+
+ self.system_data = self.get_system_info()
+ if self.systemname or self.ntpip or self.timezone or self.time:
+ self.system_update(self.system_data)
+
+ # DNS configuration
+ self.dns_configure()
+
+ # For honour based licenses
+ modify = self.license_probe()
+ if modify:
+ self.license_update(modify)
+
+ # For key based licenses
+ if self.license_key:
+ self.license_key_update()
+
+ if self.changed:
+ if self.module.check_mode:
+ msg = "skipping changes due to check mode."
+ else:
+ msg = self.message
+ else:
+ msg = "No modifications required. Exiting with no changes."
+
+ self.module.exit_json(msg=msg, changed=self.changed)
+
+
+def main():
+ v = IBMSVCInitialSetup()
+ try:
+ v.apply()
+ except Exception as e:
+ v.log("Exception in apply(): \n%s", format_exc())
+ v.module.fail_json(msg="Module failed. Error [%s]." % to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_callhome.py b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_callhome.py
new file mode 100644
index 000000000..dd537b490
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_callhome.py
@@ -0,0 +1,890 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2021 IBM CORPORATION
+# Author(s): Sreshtant Bohidar <sreshtant.bohidar@ibm.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_svc_manage_callhome
+short_description: This module manages Call Home feature configuration on IBM Storage Virtualize
+ family systems
+description:
+ - Ansible interface to manage cloud and email Call Home feature.
+version_added: "1.7.0"
+options:
+ state:
+ description:
+ - Enables or updates (C(enabled)) or disables (C(disabled)) Call Home feature.
+ choices: [ enabled, disabled ]
+ required: true
+ type: str
+ clustername:
+ description:
+ - The hostname or management IP of the Storage Virtualize system.
+ type: str
+ required: true
+ domain:
+ description:
+ - Domain for the Storage Virtualize system.
+ - Valid when hostname is used for the parameter I(clustername).
+ type: str
+ username:
+ description:
+ - REST API username for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ password:
+ description:
+ - REST API password for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ token:
+ description:
+ - The authentication token to verify a user on the Storage Virtualize system.
+ - To generate a token, use the ibm_svc_auth module.
+ type: str
+ callhome_type:
+ description:
+ - Specifies the transmission type.
+ choices: [ 'cloud services', 'email', 'both' ]
+ required: True
+ type: str
+ proxy_type:
+ description:
+ - Specifies the proxy type.
+ - Required when I(state=enabled), to create or modify Call Home feature.
+ - Proxy gets deleted for I(proxy_type=no_proxy).
+ - The parameter is mandatory when I(callhome_type='cloud services')) or I(callhome_type='both').
+ choices: [ open_proxy, basic_authentication, certificate, no_proxy ]
+ type: str
+ proxy_url:
+ description:
+ - Specifies the proxy server URL with a protocol prefix in fully qualified domain name format.
+ - Applies when I(state=enabled) and I(proxy_type=open_proxy) or I(proxy_type=basic_authentication).
+ type: str
+ proxy_port:
+ description:
+ - Specifies the proxy server port number.
+ The value must be in the range 1 - 65535.
+ - Applies when I(state=enabled) and I(proxy_type=open_proxy) or I(proxy_type=basic_authentication).
+ type: int
+ proxy_username:
+ description:
+ - Specifies the proxy's username.
+ - Applies when I(state=enabled) and I(proxy_type=basic_authentication).
+ type: str
+ proxy_password:
+ description:
+ - Specifies the proxy's password.
+ - Applies when I(state=enabled) and I(proxy_type=basic_authentication).
+ type: str
+ sslcert:
+ description:
+ - Specifies the file path of proxy's certificate.
+ - Applies when I(state=enabled) and I(proxy_type=certificate).
+ type: str
+ company_name:
+ description:
+ - Specifies the user's organization as it should appear in Call Home email.
+ - Required when I(state=enabled).
+ type: str
+ address:
+ description:
+ - Specifies the first line of the user's address as it should appear in Call Home email.
+ - Required when I(state=enabled).
+ type: str
+ city:
+ description:
+ - Specifies the user's city as it should appear in Call Home email.
+ - Required when I(state=enabled).
+ type: str
+ province:
+ description:
+ - Specifies the user's state or province as it should appear in Call Home email.
+ - Required when I(state=enabled).
+ type: str
+ postalcode:
+ description:
+ - Specifies the user's zip code or postal code as it should appear in Call Home email.
+ - Required when I(state=enabled).
+ type: str
+ country:
+ description:
+ - Specifies the country in which the machine resides as it should appear in Call Home email.
+ - Required when I(state=enabled).
+ type: str
+ location:
+ description:
+ - Specifies the physical location of the system that has reported the error.
+ - Required when I(state=enabled).
+ type: str
+ contact_name:
+ description:
+ - Specifies the name of the person receiving the email.
+ - Required when I(state=enabled).
+ type: str
+ contact_email:
+ description:
+ - Specifies the email of the person.
+ - Required when I(state=enabled).
+ type: str
+ phonenumber_primary:
+ description:
+ - Specifies the primary contact telephone number.
+ - Required when I(state=enabled).
+ type: str
+ phonenumber_secondary:
+ description:
+ - Specifies the secondary contact telephone number.
+ - Required when I(state=enabled).
+ type: str
+ serverIP:
+ description:
+ - Specifies the IP address of the email server.
+ - Required when I(state=enabled) and I(callhome_type=email) or I(callhome_type=both).
+ type: str
+ serverPort:
+ description:
+ - Specifies the port number of the email server.
+ - The value must be in the range 1 - 65535.
+ - Required when I(state=enabled) and I(callhome_type=email) or I(callhome_type=both).
+ type: int
+ inventory:
+ description:
+ - Specifies whether the recipient mentioned in parameter I(contact_email) receives inventory email notifications.
+ - Applies when I(state=enabled).
+ If unspecified, default value 'off' will be used.
+ choices: ['on', 'off']
+ type: str
+ invemailinterval:
+ description:
+ - Specifies the interval at which inventory emails are sent to the configured email recipients.
+ - The interval is measured in days. The value must be in the range 0 - 15.
+ - Setting the value to '0' turns off the inventory email notification function.
+ Valid if I(inventory) is set to 'on'.
+ type: int
+ enhancedcallhome:
+ description:
+ - Specifies that the Call Home function is to send enhanced reports to the support center.
+ - Applies when I(state=enabled).
+ - If unspecified, default value 'off' will be used.
+ choices: ['on', 'off']
+ type: str
+ censorcallhome:
+ description:
+ - Specifies that sensitive data is deleted from the enhanced Call Home data.
+ - Applies when I(state=enabled).
+ - If unspecified, default value 'off' will be used.
+ choices: ['on', 'off']
+ type: str
+ validate_certs:
+ description:
+ - Validates certification.
+ default: false
+ type: bool
+ log_path:
+ description:
+ - Path of debug log file.
+ type: str
+author:
+ - Sreshtant Bohidar(@Sreshtant-Bohidar)
+notes:
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = '''
+- name: Configure callhome with both email and cloud
+ ibm.storage_virtualize.ibm_svc_manage_callhome:
+ clustername: "{{ clustername }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ log_path: "/tmp/playbook.debug"
+ state: "enabled"
+ callhome_type: "both"
+ address: "{{ address }}"
+ city: "{{ city }}"
+ company_name: "{{ company_name }}"
+ contact_email: "{{ contact_email }}"
+ contact_name: "{{ contact_name }}"
+ country: "{{ country }}"
+ location: "{{ location }}"
+ phonenumber_primary: "{{ primary_phonenumber }}"
+ postalcode: "{{ postal_code }}"
+ province: "{{ province }}"
+ proxy_type: "{{ proxy_type }}"
+ proxy_url: "{{ proxy_url }}"
+ proxy_port: "{{ proxy_port }}"
+ serverIP: "{{ server_ip }}"
+ serverPort: "{{ server_port }}"
+ inventory: "on"
+ invemailinterval: 1
+ enhancedcallhome: "on"
+ censorcallhome: "on"
+'''
+
+RETURN = '''#'''
+
+from traceback import format_exc
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi, svc_argument_spec, get_logger
+from ansible.module_utils._text import to_native
+import time
+
+
+class IBMSVCCallhome(object):
+ def __init__(self):
+ argument_spec = svc_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(type='str', required=True, choices=['enabled', 'disabled']),
+ callhome_type=dict(type='str', required=True, choices=['cloud services', 'email', 'both']),
+ proxy_type=dict(type='str', choices=['open_proxy', 'basic_authentication', 'certificate', 'no_proxy']),
+ proxy_url=dict(type='str'),
+ proxy_port=dict(type='int'),
+ proxy_username=dict(type='str'),
+ proxy_password=dict(type='str', no_log=True),
+ sslcert=dict(type='str'),
+ company_name=dict(type='str'),
+ address=dict(type='str'),
+ city=dict(type='str'),
+ province=dict(type='str'),
+ postalcode=dict(type='str'),
+ country=dict(type='str'),
+ location=dict(type='str'),
+ contact_name=dict(type='str'),
+ contact_email=dict(type='str'),
+ phonenumber_primary=dict(type='str'),
+ phonenumber_secondary=dict(type='str'),
+ serverIP=dict(type='str'),
+ serverPort=dict(type='int'),
+ inventory=dict(type='str', choices=['on', 'off']),
+ invemailinterval=dict(type='int'),
+ enhancedcallhome=dict(type='str', choices=['on', 'off']),
+ censorcallhome=dict(type='str', choices=['on', 'off'])
+ )
+ )
+
+ self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ # logging setup
+ log_path = self.module.params['log_path']
+ log = get_logger(self.__class__.__name__, log_path)
+ self.log = log.info
+
+ # Required
+ self.state = self.module.params['state']
+ self.callhome_type = self.module.params['callhome_type']
+ self.company_name = self.module.params['company_name']
+ self.address = self.module.params['address']
+ self.city = self.module.params['city']
+ self.province = self.module.params['province']
+ self.postalcode = self.module.params['postalcode']
+ self.country = self.module.params['country']
+ self.location = self.module.params['location']
+ self.contact_name = self.module.params['contact_name']
+ self.contact_email = self.module.params['contact_email']
+ self.phonenumber_primary = self.module.params['phonenumber_primary']
+
+ # Optional
+ self.proxy_type = self.module.params.get('proxy_type', False)
+ self.proxy_url = self.module.params.get('proxy_url', False)
+ self.proxy_port = self.module.params.get('proxy_port', False)
+ self.proxy_username = self.module.params.get('proxy_username', False)
+ self.proxy_password = self.module.params.get('proxy_password', False)
+ self.sslcert = self.module.params.get('sslcert', False)
+ self.phonenumber_secondary = self.module.params.get('phonenumber_secondary', False)
+ self.serverIP = self.module.params.get('serverIP', False)
+ self.serverPort = self.module.params.get('serverPort', False)
+ self.inventory = self.module.params.get('inventory', False)
+ self.invemailinterval = self.module.params.get('invemailinterval', False)
+ self.enhancedcallhome = self.module.params.get('enhancedcallhome', False)
+ self.censorcallhome = self.module.params.get('censorcallhome', False)
+
+ # creating an instance of IBMSVCRestApi
+ self.restapi = IBMSVCRestApi(
+ module=self.module,
+ clustername=self.module.params['clustername'],
+ domain=self.module.params['domain'],
+ username=self.module.params['username'],
+ password=self.module.params['password'],
+ validate_certs=self.module.params['validate_certs'],
+ log_path=log_path,
+ token=self.module.params['token']
+ )
+
+ def basic_checks(self):
+ # setting the default value if unspecified
+ if not self.inventory:
+ self.inventory = 'off'
+ if not self.enhancedcallhome:
+ self.enhancedcallhome = 'off'
+ if not self.censorcallhome:
+ self.censorcallhome = 'off'
+ # perform some basic handling for few parameters
+ if self.inventory == 'on':
+ if not self.invemailinterval:
+ self.module.fail_json(msg="Parameter [invemailinterval] should be configured to use [inventory]")
+ if self.invemailinterval:
+ if self.inventory == 'off':
+ self.module.fail_json(msg="The parameter [inventory] should be configured with 'on' while setting [invemailinterval]")
+ if self.invemailinterval not in range(1, 16):
+ self.module.fail_json(msg="Parameter [invemailinterval] supported range is 0 to 15")
+ if isinstance(self.serverPort, int):
+ if self.serverPort not in range(1, 65536):
+ self.module.fail_json(msg="Parameter [serverPort] must be in range[1-65535]")
+ if isinstance(self.proxy_port, int):
+ if self.proxy_port not in range(1, 65536):
+ self.module.fail_json(msg="Parameter [proxy_port] must be in range[1-65535]")
+ if not self.state:
+ self.module.fail_json(msg="Missing mandatory parameter: state")
+ if not self.callhome_type:
+ self.module.fail_json(msg="Missing mandatory parameter: callhome_type")
+ if (self.callhome_type in ['email', 'both']) and (not self.serverIP or not self.serverPort) and (self.state == 'enabled'):
+ self.module.fail_json(msg="Parameters: serverIP, serverPort are required when callhome_type is email/both")
+ if self.state == "enabled" and self.proxy_type in ["cloud services", "both"] and self.proxy_type:
+ if self.proxy_type == 'open_proxy' and (not self.proxy_url or not self.proxy_port):
+ self.module.fail_json(msg="Parameters [proxy_url, proxy_port] required when proxy_type=open_proxy")
+ if self.proxy_type == 'basic_authentication' and (not self.proxy_url or not self.proxy_port or not self.proxy_username or not self.proxy_password):
+ self.module.fail_json(msg="Parameters [proxy_url, proxy_port, proxy_username, proxy_password] required when proxy_type=basic_authentication")
+ if self.proxy_type == 'certificate' and (not self.proxy_url or not self.proxy_port or not self.sslcert):
+ self.module.fail_json(msg="Parameters [proxy_url, proxy_port, sslcert] required when proxy_type=certificate")
+ if self.state == 'enabled':
+ parameters = {
+ 'callhome_type': self.callhome_type,
+ 'company_name': self.company_name,
+ 'address': self.address,
+ 'city': self.city,
+ 'province': self.province,
+ 'country': self.country,
+ 'location': self.location,
+ 'contact_name': self.contact_name,
+ 'contact_email': self.contact_email,
+ 'phonenumber_primary': self.phonenumber_primary,
+ }
+ parameter_not_provided = []
+ for parameter in parameters:
+ if not parameters[parameter]:
+ parameter_not_provided.append(parameter)
+ if parameter_not_provided:
+ self.module.fail_json(msg="Parameters {0} are required when state is 'enabled'".format(parameter_not_provided))
+
+ # function to fetch lssystem data
+ def get_system_data(self):
+ return self.restapi.svc_obj_info('lssystem', cmdopts=None, cmdargs=None)
+
+ # function to probe lssystem data
+ def probe_system(self, data):
+ modify = {}
+ if self.invemailinterval:
+ if self.invemailinterval != data['inventory_mail_interval']:
+ modify['invemailinterval'] = self.invemailinterval
+ if self.enhancedcallhome:
+ if self.enhancedcallhome != data['enhanced_callhome']:
+ modify['enhancedcallhome'] = self.enhancedcallhome
+ if self.censorcallhome:
+ if self.censorcallhome != data['enhanced_callhome']:
+ modify['censorcallhome'] = self.censorcallhome
+ return modify
+
+ # function to execute chsystem commands
+ def update_system(self, modify):
+ command = 'chsystem'
+ command_options = modify
+ cmdargs = None
+ self.restapi.svc_run_command(command, command_options, cmdargs)
+ self.log("Chsystem commands executed.")
+
+ # function to fetch existing email user
+ def get_existing_email_user_data(self):
+ data = {}
+ email_data = self.restapi.svc_obj_info(cmd='lsemailuser', cmdopts=None, cmdargs=None)
+ for item in email_data:
+ if item['address'] == self.contact_email:
+ data = item
+ return data
+
+ # function to check if email server exists or not
+ def check_email_server_exists(self):
+ status = False
+ data = self.restapi.svc_obj_info(cmd='lsemailserver', cmdopts=None, cmdargs=None)
+ for item in data:
+ if item['IP_address'] == self.serverIP and int(item['port']) == self.serverPort:
+ status = True
+ break
+ return status
+
+ # function to check if email user exists or not
+ def check_email_user_exists(self):
+ temp = {}
+ data = self.restapi.svc_obj_info(cmd='lsemailuser', cmdopts=None, cmdargs=None)
+ for item in data:
+ if item['address'] == self.contact_email:
+ temp = item
+ break
+ return temp
+
+ # function to create an email server
+ def create_email_server(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+ self.log("Creating email server '%s:%s'.", self.serverIP, self.serverPort)
+ command = 'mkemailserver'
+ command_options = {
+ 'ip': self.serverIP,
+ 'port': self.serverPort,
+ }
+ cmdargs = None
+ result = self.restapi.svc_run_command(command, command_options, cmdargs)
+ if 'message' in result:
+ self.changed = True
+ self.log("create email server result message '%s'", (result['message']))
+ else:
+ self.module.fail_json(
+ msg="Failed to create email server [%s:%s]" % (self.serverIP, self.serverPort)
+ )
+
+ # function to update email user
+ def update_email_user(self, data, id):
+ command = "chemailuser"
+ command_options = data
+ cmdargs = [id]
+ self.restapi.svc_run_command(command, command_options, cmdargs)
+ self.log('Email user updated successfully.')
+
+ # function to manage support email user
+ def manage_support_email_user(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+ support_email = {}
+ selected_email_id = ''
+ t = -1 * ((time.timezone / 60) / 60)
+ if t >= -8 and t <= -4:
+ # for US timezone, callhome0@de.ibm.com is used
+ selected_email_id = 'callhome0@de.ibm.com'
+ else:
+ # for ROW, callhome1@de.ibm.com is used
+ selected_email_id = 'callhome1@de.ibm.com'
+ existing_user = self.restapi.svc_obj_info('lsemailuser', cmdopts=None, cmdargs=None)
+ if existing_user:
+ for user in existing_user:
+ if user['user_type'] == 'support':
+ support_email = user
+ if not support_email:
+ self.log("Creating support email user '%s'.", selected_email_id)
+ command = 'mkemailuser'
+ command_options = {
+ 'address': selected_email_id,
+ 'usertype': 'support',
+ 'info': 'off',
+ 'warning': 'off',
+ }
+ if self.inventory:
+ command_options['inventory'] = self.inventory
+ cmdargs = None
+ result = self.restapi.svc_run_command(command, command_options, cmdargs)
+ if 'message' in result:
+ self.changed = True
+ self.log("create support email user result message '%s'", (result['message']))
+ else:
+ self.module.fail_json(
+ msg="Failed to support create email user [%s]" % (self.contact_email)
+ )
+ else:
+ modify = {}
+ if support_email['address'] != selected_email_id:
+ modify['address'] = selected_email_id
+ if self.inventory:
+ if support_email['inventory'] != self.inventory:
+ modify['inventory'] = self.inventory
+ if modify:
+ self.restapi.svc_run_command(
+ 'chemailuser',
+ modify,
+ [support_email['id']]
+ )
+ self.log("Updated support user successfully.")
+
+ # function to create an email user
+ def create_email_user(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+ self.log("Creating email user '%s'.", self.contact_email)
+ command = 'mkemailuser'
+ command_options = {
+ 'address': self.contact_email,
+ 'usertype': 'local',
+ }
+ if self.inventory:
+ command_options['inventory'] = self.inventory
+ cmdargs = None
+ result = self.restapi.svc_run_command(command, command_options, cmdargs)
+ if 'message' in result:
+ self.changed = True
+ self.log("Create email user result message '%s'.", (result['message']))
+ else:
+ self.module.fail_json(
+ msg="Failed to create email user [%s]" % (self.contact_email)
+ )
+
+ # function to enable email callhome
+ def enable_email_callhome(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+ command = "startemail"
+ command_options = {}
+ cmdargs = None
+ self.restapi.svc_run_command(command, command_options, cmdargs)
+ self.log("Email callhome enabled.")
+
+ # function to disable email callhome
+ def disable_email_callhome(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+ command = "stopemail"
+ command_options = {}
+ cmdargs = None
+ self.restapi.svc_run_command(command, command_options, cmdargs)
+ self.log("Email callhome disabled.")
+
+ # function to update email data
+ def update_email_data(self):
+
+ if self.module.check_mode:
+ self.changed = True
+ return
+ command = "chemail"
+ command_options = {}
+ if self.contact_email:
+ command_options['reply'] = self.contact_email
+ if self.contact_name:
+ command_options['contact'] = self.contact_name
+ if self.phonenumber_primary:
+ command_options['primary'] = self.phonenumber_primary
+ if self.phonenumber_secondary:
+ command_options['alternate'] = self.phonenumber_secondary
+ if self.location:
+ command_options['location'] = self.location
+ if self.company_name:
+ command_options['organization'] = self.company_name
+ if self.address:
+ command_options['address'] = self.address
+ if self.city:
+ command_options['city'] = self.city
+ if self.province:
+ command_options['state'] = self.province
+ if self.postalcode:
+ command_options['zip'] = self.postalcode
+ if self.country:
+ command_options['country'] = self.country
+ cmdargs = None
+ if command_options:
+ self.restapi.svc_run_command(command, command_options, cmdargs)
+ self.log("Email data successfully updated.")
+
+ # function for checking if proxy server exists
+ def get_existing_proxy(self):
+ data = {}
+ data = self.restapi.svc_obj_info(cmd='lsproxy', cmdopts=None, cmdargs=None)
+ return data
+
+ # function for removing a proxy
+ def remove_proxy(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+ command = 'rmproxy'
+ command_options = None
+ cmdargs = None
+ self.restapi.svc_run_command(command, command_options, cmdargs)
+ self.log('Proxy removed successfully.')
+
+ # function for creating a proxy
+ def create_proxy(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+ command = 'mkproxy'
+ command_options = {}
+ if self.proxy_type == 'open_proxy':
+ if self.proxy_url:
+ command_options['url'] = self.proxy_url
+ if self.proxy_port:
+ command_options['port'] = self.proxy_port
+ elif self.proxy_type == 'basic_authentication':
+ if self.proxy_url:
+ command_options['url'] = self.proxy_url
+ if self.proxy_port:
+ command_options['port'] = self.proxy_port
+ if self.proxy_username:
+ command_options['username'] = self.proxy_username
+ if self.proxy_password:
+ command_options['password'] = self.proxy_password
+ elif self.proxy_type == 'certificate':
+ if self.proxy_url:
+ command_options['url'] = self.proxy_url
+ if self.proxy_port:
+ command_options['port'] = self.proxy_port
+ if self.sslcert:
+ command_options['sslcert'] = self.sslcert
+
+ cmdargs = None
+ self.restapi.svc_run_command(command, command_options, cmdargs)
+ self.log("Proxy created successfully.")
+
+ # function for probing existing proxy data
+ def probe_proxy(self, data):
+ modify = {}
+ if self.proxy_type == 'open_proxy':
+ if self.proxy_url:
+ if self.proxy_url != data['url']:
+ modify['url'] = self.proxy_url
+ if self.proxy_port:
+ if int(self.proxy_port) != int(data['port']):
+ modify['port'] = self.proxy_port
+ elif self.proxy_type == 'basic_authentication':
+ if self.proxy_url:
+ if self.proxy_url != data['url']:
+ modify['url'] = self.proxy_url
+ if self.proxy_port:
+ if self.proxy_port != int(data['port']):
+ modify['port'] = self.proxy_port
+ if self.proxy_username:
+ if self.proxy_username != data['username']:
+ modify['username'] = self.proxy_username
+ if self.proxy_password:
+ modify['password'] = self.proxy_password
+ elif self.proxy_type == 'certificate':
+ if self.proxy_url:
+ if self.proxy_url != data['url']:
+ modify['url'] = self.proxy_url
+ if self.proxy_port:
+ if self.proxy_port != int(data['port']):
+ modify['port'] = self.proxy_port
+ if self.sslcert:
+ modify['sslcert'] = self.sslcert
+ return modify
+
+ # function for updating a proxy
+ def update_proxy(self, data):
+ if self.module.check_mode:
+ self.changed = True
+ return
+ command = 'chproxy'
+ command_options = data
+ cmdargs = None
+ self.restapi.svc_run_command(command, command_options, cmdargs)
+ self.log('Proxy updated successfully.')
+
+ # function for fetching existing cloud callhome data
+ def get_existing_cloud_callhome_data(self):
+ data = {}
+ command = 'lscloudcallhome'
+ command_options = None
+ cmdargs = None
+ data = self.restapi.svc_obj_info(command, command_options, cmdargs)
+ return data
+
+ # function for enabling cloud callhome
+ def enable_cloud_callhome(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+ command = 'chcloudcallhome'
+ command_options = {
+ 'enable': True
+ }
+ cmdargs = None
+ self.restapi.svc_run_command(command, command_options, cmdargs)
+ self.changed = True
+ self.log('Cloud callhome enabled.')
+
+ # function for doing connection test for cloud callhome
+ def test_connection_cloud_callhome(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+ command = 'sendcloudcallhome'
+ command_options = {
+ 'connectiontest': True
+ }
+ self.restapi.svc_run_command(command, command_options, None)
+ self.changed = True
+ self.log('Cloud callhome connection tested.')
+ # the connection testing can take some time to complete.
+ time.sleep(3)
+
+ # function for managing proxy server
+ def manage_proxy_server(self):
+ proxy_data = self.get_existing_proxy()
+ if proxy_data['enabled'] == 'no':
+ if self.proxy_type == 'no_proxy':
+ self.log('Proxy already disabled.')
+ else:
+ self.create_proxy()
+ self.changed = True
+ elif proxy_data['enabled'] == 'yes':
+ if self.proxy_type == 'no_proxy':
+ self.remove_proxy()
+ self.changed = True
+ else:
+ modify = self.probe_proxy(proxy_data)
+ if modify:
+ self.update_proxy(modify)
+ self.changed = True
+
+ # function for disabling cloud callhome
+ def disable_cloud_callhome(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+ command = 'chcloudcallhome'
+ command_options = {
+ 'disable': True
+ }
+ cmdargs = None
+ self.restapi.svc_run_command(command, command_options, cmdargs)
+ self.log('Cloud callhome disabled.')
+
+ # function to initiate callhome with cloud
+ def initiate_cloud_callhome(self):
+ msg = ''
+ attempts = 0
+ limit_reached = False
+ active_status = False
+ # manage proxy server
+ self.manage_proxy_server()
+ # update email data
+ self.update_email_data()
+ # manage cloud callhome
+ lsdata = self.get_existing_cloud_callhome_data()
+ if lsdata['status'] == 'enabled':
+ # perform connection test
+ self.test_connection_cloud_callhome()
+ else:
+ self.enable_cloud_callhome()
+ # cloud callhome takes some time to get enabled.
+ while not active_status:
+ attempts += 1
+ if attempts > 10:
+ limit_reached = True
+ break
+ lsdata = self.get_existing_cloud_callhome_data()
+ if lsdata['status'] == 'enabled':
+ active_status = True
+ time.sleep(2)
+ if limit_reached:
+ # the module will exit without performing connection test.
+ msg = "Callhome with Cloud is enabled. Please check connection to proxy."
+ self.changed = True
+ return msg
+ if active_status:
+ # perform connection test
+ self.test_connection_cloud_callhome()
+ msg = "Callhome with Cloud enabled successfully."
+ self.changed = True
+ return msg
+
+ # function to initiate callhome with email notifications
+ def initiate_email_callhome(self):
+ msg = ''
+ # manage email server
+ email_server_exists = self.check_email_server_exists()
+ if email_server_exists:
+ self.log("Email server already exists.")
+ else:
+ self.create_email_server()
+ self.changed = True
+ # manage support email user
+ self.manage_support_email_user()
+ # manage local email user
+ email_user_exists = self.check_email_user_exists()
+ if email_user_exists:
+ email_user_modify = {}
+ if email_user_exists['inventory'] != self.inventory:
+ email_user_modify['inventory'] = self.inventory
+ if email_user_modify:
+ self.update_email_user(email_user_modify, email_user_exists['id'])
+ else:
+ self.create_email_user()
+ # manage email data
+ self.update_email_data()
+ # enable email callhome
+ self.enable_email_callhome()
+ msg = "Callhome with email enabled successfully."
+ self.changed = True
+ return msg
+
+ def apply(self):
+ self.changed = False
+ msg = None
+ self.basic_checks()
+ if self.state == 'enabled':
+ # enable cloud callhome
+ if self.callhome_type == 'cloud services':
+ msg = self.initiate_cloud_callhome()
+ # enable email callhome
+ elif self.callhome_type == 'email':
+ msg = self.initiate_email_callhome()
+ # enable both cloud and email callhome
+ elif self.callhome_type == 'both':
+ temp_msg = ''
+ temp_msg += self.initiate_cloud_callhome()
+ temp_msg += ' ' + self.initiate_email_callhome()
+ if temp_msg:
+ msg = temp_msg
+ # manage chsystem parameters
+ system_data = self.get_system_data()
+ system_modify = self.probe_system(system_data)
+ if system_modify:
+ self.update_system(system_modify)
+ elif self.state == 'disabled':
+ if self.callhome_type == 'cloud services':
+ cloud_callhome_data = self.get_existing_cloud_callhome_data()
+ if cloud_callhome_data['status'] == 'disabled':
+ msg = "Callhome with cloud already disabled."
+ elif cloud_callhome_data['status'] == 'enabled':
+ self.disable_cloud_callhome()
+ msg = "Callhome with cloud disabled successfully."
+ self.changed = True
+ elif self.callhome_type == 'email':
+ self.disable_email_callhome()
+ msg = "Callhome with email disabled successfully."
+ self.changed = True
+ elif self.callhome_type == 'both':
+ # disable email callhome
+ self.disable_email_callhome()
+ msg = "Callhome with email disabled successfully."
+ self.changed = True
+ # disable cloud callhome
+ cloud_callhome_data = self.get_existing_cloud_callhome_data()
+ if cloud_callhome_data['status'] == 'disabled':
+ msg += " Callhome with cloud already disabled."
+ elif cloud_callhome_data['status'] == 'enabled':
+ self.disable_cloud_callhome()
+ msg += " Callhome with cloud disabled successfully."
+ self.changed = True
+ self.module.exit_json(msg=msg, changed=self.changed)
+
+
+def main():
+ v = IBMSVCCallhome()
+ try:
+ v.apply()
+ except Exception as e:
+ v.log("Exception in apply(): \n%s", format_exc())
+ v.module.fail_json(msg="Module failed. Error [%s]." % to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_consistgrp_flashcopy.py b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_consistgrp_flashcopy.py
new file mode 100644
index 000000000..6563af66b
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_consistgrp_flashcopy.py
@@ -0,0 +1,280 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2021 IBM CORPORATION
+# Author(s): Sreshtant Bohidar <sreshtant.bohidar@ibm.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_svc_manage_consistgrp_flashcopy
+short_description: This module manages FlashCopy consistency groups on IBM Storage Virtualize
+ family systems
+description:
+ - Ansible interface to manage 'mkfcconsistgrp' and 'rmfcconsistgrp' volume commands.
+version_added: "1.4.0"
+options:
+ name:
+ description:
+ - Specifies the name of the FlashCopy consistency group.
+ required: true
+ type: str
+ state:
+ description:
+ - Creates (C(present)) or removes (C(absent)) a FlashCopy consistency group.
+ choices: [ present, absent ]
+ required: true
+ type: str
+ clustername:
+ description:
+ - The hostname or management IP of the Storage Virtualize system.
+ type: str
+ required: true
+ domain:
+ description:
+ - Domain for the Storage Virtualize system.
+ - Valid when hostname is used for the parameter I(clustername).
+ type: str
+ username:
+ description:
+ - REST API username for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ password:
+ description:
+ - REST API password for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ token:
+ description:
+ - The authentication token to verify a user on the Storage Virtualize system.
+ - To generate a token, use the M(ibm.storage_virtualize.ibm_svc_auth) module.
+ type: str
+ version_added: '1.5.0'
+ ownershipgroup:
+ description:
+ - Specifies the name of the ownership group.
+ - Parameters I(ownershipgroup) and I(noownershipgroup) are mutually exclusive.
+ - Valid when I(state=present), to create or modify a FlashCopy consistency group.
+ required: false
+ type: str
+ noownershipgroup:
+ description:
+ - If specified True, the consistency group is removed from all associated ownership groups.
+ - Parameters I(noownershipgroup) and I(ownershipgroup) are mutually exclusive.
+ - Valid when I(state=present), to modify a FlashCopy consistency group.
+ required: false
+ type: bool
+ force:
+ description:
+ - If specified True, removes all the associated FlashCopy mappings while deleting the FlashCopy consistency group.
+ - Valid when I(state=absent), to delete a FlashCopy consistency group.
+ required: false
+ type: bool
+ log_path:
+ description:
+ - Path of debug log file.
+ type: str
+ validate_certs:
+ description:
+ - Validates certification.
+ default: false
+ type: bool
+author:
+ - Sreshtant Bohidar(@Sreshtant-Bohidar)
+notes:
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = '''
+- name: Create a FlashCopy consistency group
+ ibm.storage_virtualize.ibm_svc_manage_consistgrp_flashcopy:
+ clustername: "{{clustername}}"
+ domain: "{{domain}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ log_path: /tmp/playbook.debug
+ name: consistgroup-name
+ state: present
+ ownershipgroup: ownershipgroup-name
+- name: Delete a FlashCopy consistency group
+ ibm.storage_virtualize.ibm_svc_manage_consistgrp_flashcopy:
+ clustername: "{{clustername}}"
+ domain: "{{domain}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ log_path: /tmp/playbook.debug
+ name: consistgroup-name
+ state: absent
+ force: true
+'''
+
+RETURN = '''#'''
+
+from traceback import format_exc
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi, svc_argument_spec, get_logger
+from ansible.module_utils._text import to_native
+
+
+class IBMSVCFlashcopyConsistgrp(object):
+ def __init__(self):
+ argument_spec = svc_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', required=True, choices=['present', 'absent']),
+ ownershipgroup=dict(type='str', required=False),
+ noownershipgroup=dict(type='bool', required=False),
+ force=dict(type='bool', required=False),
+ )
+ )
+
+ self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ # logging setup
+ log_path = self.module.params['log_path']
+ log = get_logger(self.__class__.__name__, log_path)
+ self.log = log.info
+
+ # Required
+ self.name = self.module.params['name']
+ self.state = self.module.params['state']
+
+ # Optional
+ self.ownershipgroup = self.module.params.get('ownershipgroup', False)
+ self.noownershipgroup = self.module.params.get('noownershipgroup', False)
+ self.force = self.module.params.get('force', False)
+
+ # Handling missing mandatory parameters name
+ if not self.name:
+ self.module.fail_json(msg='Missing mandatory parameter: name')
+
+ self.restapi = IBMSVCRestApi(
+ module=self.module,
+ clustername=self.module.params['clustername'],
+ domain=self.module.params['domain'],
+ username=self.module.params['username'],
+ password=self.module.params['password'],
+ validate_certs=self.module.params['validate_certs'],
+ log_path=log_path,
+ token=self.module.params['token']
+ )
+
+ def get_existing_fcconsistgrp(self):
+ data = {}
+ data = self.restapi.svc_obj_info(cmd='lsfcconsistgrp', cmdopts=None,
+ cmdargs=[self.name])
+ return data
+
+ def fcconsistgrp_create(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ cmd = 'mkfcconsistgrp'
+ cmdopts = {}
+ cmdopts['name'] = self.name
+ if self.ownershipgroup:
+ cmdopts['ownershipgroup'] = self.ownershipgroup
+
+ self.log("Creating fc consistgrp.. Command: %s opts %s", cmd, cmdopts)
+ result = self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
+ if 'message' in result:
+ self.changed = True
+ self.log("Create fc consistgrp message %s", result['message'])
+ else:
+ self.module.fail_json(msg="Failed to create fc consistgrp [%s]" % self.name)
+
+ def fcconsistgrp_delete(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ cmd = 'rmfcconsistgrp'
+ cmdopts = {}
+ if self.force:
+ cmdopts['force'] = self.force
+
+ self.log("Deleting fc consistgrp.. Command %s opts %s", cmd, cmdopts)
+ self.restapi.svc_run_command(cmd, cmdopts, cmdargs=[self.name])
+
+ def fcconsistgrp_probe(self, data):
+ props = {}
+ self.log("Probe which properties need to be updated...")
+ if not self.noownershipgroup:
+ if self.ownershipgroup and self.ownershipgroup != data["owner_name"]:
+ props["ownershipgroup"] = self.ownershipgroup
+ if self.noownershipgroup and data["owner_name"]:
+ props['noownershipgroup'] = self.noownershipgroup
+ return props
+
+ def fcconsistgrp_update(self, modify):
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ if modify:
+ self.log("updating fcmap with properties %s", modify)
+ cmd = 'chfcconsistgrp'
+ cmdopts = {}
+ for prop in modify:
+ cmdopts[prop] = modify[prop]
+
+ cmdargs = [self.name]
+ self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
+
+ def apply(self):
+ changed = False
+ msg = None
+ modify = []
+ gdata = self.get_existing_fcconsistgrp()
+ if gdata:
+ if self.state == "absent":
+ self.log("fc consistgrp [%s] exist, but requested state is 'absent'", self.name)
+ changed = True
+ elif self.state == "present":
+ modify = self.fcconsistgrp_probe(gdata)
+ if modify:
+ changed = True
+ else:
+ if self.state == "present":
+ self.log("fc consistgrp [%s] doesn't exist, but requested state is 'present'", self.name)
+ changed = True
+ if changed:
+ if self.state == "absent":
+ self.fcconsistgrp_delete()
+ msg = "fc consistgrp [%s] has been deleted" % self.name
+ elif self.state == "present" and modify:
+ self.fcconsistgrp_update(modify)
+ msg = "fc consistgrp [%s] has been modified" % self.name
+ elif self.state == "present" and not modify:
+ self.fcconsistgrp_create()
+ msg = "fc consistgrp [%s] has been created" % self.name
+
+ if self.module.check_mode:
+ msg = 'skipping changes due to check mode.'
+ else:
+ if self.state == "absent":
+ msg = "fc consistgrp [%s] does not exist" % self.name
+ elif self.state == "present":
+ msg = "fc consistgrp [%s] already exists" % self.name
+
+ self.module.exit_json(msg=msg, changed=changed)
+
+
+def main():
+ v = IBMSVCFlashcopyConsistgrp()
+ try:
+ v.apply()
+ except Exception as e:
+ v.log("Exception in apply(): \n%s", format_exc())
+ v.module.fail_json(msg="Module failed. Error [%s]." % to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_cv.py b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_cv.py
new file mode 100644
index 000000000..d8a9f199e
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_cv.py
@@ -0,0 +1,401 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2020 IBM CORPORATION
+# Author(s): Shilpi Jain <shilpi.jain1@ibm.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_svc_manage_cv
+short_description: This module manages the change volume for a given volume on IBM
+ Storage Virtualize family systems
+description:
+ - Ansible interface to manage the change volume in remote copy replication on IBM Storage Virtualize family systems.
+version_added: "1.3.0"
+options:
+ state:
+ description:
+ - Creates or updates (C(present)) or removes (C(absent)), a change volume.
+ choices: [absent, present]
+ required: true
+ type: str
+ rname:
+ description:
+ - Specifies the name of the remote copy relationship.
+ required: true
+ type: str
+ cvname:
+ description:
+ - Specifies the name to assign to the master or auxiliary change volume.
+ required: true
+ type: str
+ basevolume:
+ description:
+ - Specifies the base volume name (master or auxiliary).
+ - Required when I(state=present), to create the change volume.
+ type: str
+ ismaster:
+ description:
+ - Specifies whether the change volume is being (dis)associated with master cluster.
+ - Required when the change volume is being associated or disassociated from the master cluster.
+ type: bool
+ default: true
+ clustername:
+ description:
+ - The hostname or management IP of the Storage Virtualize system.
+ type: str
+ required: true
+ domain:
+ description:
+ - Domain for the Storage Virtualize system.
+ - Valid when hostname is used for the parameter I(clustername).
+ type: str
+ username:
+ description:
+ - REST API username for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ password:
+ description:
+ - REST API password for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ token:
+ description:
+ - The authentication token to verify a user on the Storage Virtualize system.
+ - To generate a token, use the ibm_svc_auth module.
+ type: str
+ version_added: '1.5.0'
+ validate_certs:
+ description:
+ - Validates certification.
+ default: false
+ type: bool
+ log_path:
+ description:
+ - Path of debug log file.
+ type: str
+author:
+ - Shilpi Jain(@Shilpi-Jain1)
+notes:
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = '''
+- name: Create master change volume and associate with rcopy
+ ibm.storage_virtualize.ibm_svc_manage_cv:
+ clustername: "{{clustername}}"
+ domain: "{{domain}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ log_path: /tmp/playbook.debug
+ state: present
+ rname: sample_rcopy
+ cvname: vol1_cv
+ basevolume: vol1
+- name: Create auxiliary change volume and associate with rcopy
+ ibm.storage_virtualize.ibm_svc_manage_cv:
+ clustername: "{{clustername}}"
+ domain: "{{domain}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ log_path: /tmp/playbook.debug
+ state: present
+ rname: sample_rcopy
+ cvname: vol2_aux_cv
+ basevolume: vol2
+ ismaster: false
+- name: Delete master change volume and disassociate from rcopy
+ ibm.storage_virtualize.ibm_svc_manage_cv:
+ clustername: "{{clustername}}"
+ domain: "{{domain}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ log_path: /tmp/playbook.debug
+ state: absent
+ rname: sample_rcopy
+ cvname: vol1_cv
+- name: Delete auxiliary change volume and disassociate from rcopy
+ ibm.storage_virtualize.ibm_svc_manage_cv:
+ clustername: "{{clustername}}"
+ domain: "{{domain}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ log_path: /tmp/playbook.debug
+ state: absent
+ rname: sample_rcopy
+ cvname: vol2_aux_cv
+ ismaster: false
+'''
+
+RETURN = '''#'''
+
+from traceback import format_exc
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi, svc_argument_spec, get_logger
+from ansible.module_utils._text import to_native
+
+
+class IBMSVCchangevolume(object):
+ def __init__(self):
+ argument_spec = svc_argument_spec()
+
+ argument_spec.update(
+ dict(
+ state=dict(type='str',
+ required=True,
+ choices=['present', 'absent']),
+ rname=dict(type='str', required=True),
+ cvname=dict(type='str', required=True),
+ basevolume=dict(type='str'),
+ ismaster=dict(type='bool', default=True)
+ )
+ )
+
+ self.module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ # logging setup
+ log_path = self.module.params['log_path']
+ log = get_logger(self.__class__.__name__, log_path)
+ self.log = log.info
+
+ # Required
+ self.state = self.module.params['state']
+ self.rname = self.module.params['rname']
+ self.cvname = self.module.params['cvname']
+
+ # Optional
+ self.basevolume = self.module.params['basevolume']
+ self.ismaster = self.module.params['ismaster']
+
+ # Handling missing mandatory parameter rname
+ if not self.rname:
+ self.module.fail_json(msg='Missing mandatory parameter: rname')
+ # Handling missing mandatory parameter cvname
+ if not self.cvname:
+ self.module.fail_json(msg='Missing mandatory parameter: cvname')
+
+ self.restapi = IBMSVCRestApi(
+ module=self.module,
+ clustername=self.module.params['clustername'],
+ domain=self.module.params['domain'],
+ username=self.module.params['username'],
+ password=self.module.params['password'],
+ validate_certs=self.module.params['validate_certs'],
+ log_path=log_path,
+ token=self.module.params['token']
+ )
+
+ def get_existing_rc(self):
+ """
+ find the remote copy relationships such as Metro Mirror, Global Mirror
+ relationships visible to the system.
+
+ Returns:
+ None if no matching instances or a list including all the matching
+ instances
+ """
+ self.log('Trying to get the remote copy relationship %s', self.rname)
+ data = self.restapi.svc_obj_info(cmd='lsrcrelationship',
+ cmdopts=None, cmdargs=[self.rname])
+
+ return data
+
+ def get_existing_vdisk(self, volname):
+ merged_result = {}
+
+ data = self.restapi.svc_obj_info(cmd='lsvdisk', cmdopts={'bytes': True},
+ cmdargs=[volname])
+
+ if not data:
+ self.log("source volume %s does not exist", volname)
+ return
+
+ if isinstance(data, list):
+ for d in data:
+ merged_result.update(d)
+ else:
+ merged_result = data
+
+ return merged_result
+
+ def change_volume_attach(self, rcrelationship_data):
+ cmdopts = {}
+
+ if rcrelationship_data['copy_type'] != 'global':
+ self.module.fail_json(msg="Relationship '%s' type must be global" % self.rname)
+
+ if self.ismaster:
+ cmdopts['masterchange'] = self.cvname
+ else:
+ cmdopts['auxchange'] = self.cvname
+
+ # command
+ cmd = 'chrcrelationship'
+ cmdargs = [self.rname]
+ self.log("updating chrcrelationship %s with properties %s", cmd, cmdopts)
+
+ # Run command
+ self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
+
+ self.changed = True
+ self.log("Updated remote copy relationship ")
+
+ def change_volume_detach(self, rcrelationship_data):
+ cmdopts = {}
+
+ if self.ismaster:
+ cmdopts = {'nomasterchange': True}
+ else:
+ cmdopts = {'noauxchange': True}
+
+ # command
+ cmd = 'chrcrelationship'
+ cmdargs = [self.rname]
+ self.log("updating chrcrelationship %s with properties %s", cmd, cmdopts)
+
+ # Run command
+ self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
+
+ self.changed = True
+ self.log("Updated remote copy relationship ")
+
+ def change_volume_probe(self):
+ is_update_required = False
+
+ rcrelationship_data = self.get_existing_rc()
+ if not rcrelationship_data:
+ self.module.fail_json(msg="Relationship '%s' does not exists, relationship must exists before calling this module" % self.rname)
+
+ if self.ismaster:
+ if self.cvname == rcrelationship_data['master_change_vdisk_name']:
+ self.log("Master change volume %s is already attached to the relationship", self.cvname)
+ elif rcrelationship_data['master_change_vdisk_name'] != '':
+ self.module.fail_json(msg="Master change volume %s is already attached to the relationship" % rcrelationship_data['master_change_vdisk_name'])
+ else:
+ is_update_required = True
+ else:
+ if self.cvname == rcrelationship_data['aux_change_vdisk_name']:
+ self.log("Aux change volume %s is already attached to the relationship", self.cvname)
+ elif rcrelationship_data['aux_change_vdisk_name'] != '':
+ self.module.fail_json(msg="Aux change volume %s is already attached to the relationship" % rcrelationship_data['aux_change_vdisk_name'])
+ else:
+ is_update_required = True
+
+ return is_update_required
+
+ def change_volume_delete(self):
+ # command
+ cmd = 'rmvolume'
+ cmdopts = None
+ cmdargs = [self.cvname]
+
+ # Run command
+ self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
+
+ self.changed = True
+ self.log("Delete vdisk %s", self.cvname)
+
+ def change_volume_create(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ if not self.basevolume:
+ self.module.fail_json(msg="You must pass in name of the master or auxiliary volume.")
+
+ # lsvdisk <basevolume>
+ vdisk_data = self.get_existing_vdisk(self.basevolume)
+ if not vdisk_data:
+ self.module.fail_json(msg="%s volume does not exist, change volume not created" % self.basevolume)
+
+ # Make command
+ cmd = 'mkvdisk'
+ cmdopts = {}
+ cmdopts['name'] = self.cvname
+ cmdopts['mdiskgrp'] = vdisk_data['mdisk_grp_name']
+ cmdopts['size'] = vdisk_data['capacity']
+ cmdopts['unit'] = 'b'
+ cmdopts['rsize'] = '0%'
+ cmdopts['autoexpand'] = True
+ cmdopts['iogrp'] = vdisk_data['IO_group_name']
+ self.log("creating vdisk command %s opts %s", cmd, cmdopts)
+
+ # Run command
+ result = self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
+
+ if 'message' in result:
+ self.changed = True
+ self.log("Create vdisk result message %s", result['message'])
+ else:
+ self.module.fail_json(
+ msg="Failed to create vdisk [%s]" % self.cvname)
+
+ def apply(self):
+ changed = False
+ msg = None
+ modify = []
+
+ vdisk_data = self.get_existing_vdisk(self.cvname)
+
+ if vdisk_data:
+ if self.state == 'absent':
+ self.log(
+ "CHANGED: Change volume exists, requested state is 'absent'")
+ changed = True
+ elif self.state == 'present':
+ modify = self.change_volume_probe()
+ if modify:
+ changed = True
+ else:
+ self.log("No change detected")
+ else:
+ if self.state == 'present':
+ changed = True
+ self.log("CHANGED: Change volume does not exist, but requested state is '%s'", self.state)
+
+ if changed:
+ if self.module.check_mode:
+ msg = 'skipping changes due to check mode.'
+ else:
+ rcrelationship_data = self.get_existing_rc()
+ if not rcrelationship_data:
+ self.module.fail_json(msg="Relationship '%s' does not exists, relationship must exists before calling this module" % self.rname)
+ else:
+ if self.state == 'present' and modify:
+ self.change_volume_attach(rcrelationship_data)
+ msg = "Change volume %s configured to the remote copy relationship." % self.cvname
+ elif self.state == 'present':
+ self.change_volume_create()
+ self.change_volume_attach(rcrelationship_data)
+ msg = "vdisk %s has been created and configured to remote copy relationship." % self.cvname
+ elif self.state == 'absent':
+ self.change_volume_detach(rcrelationship_data)
+ self.change_volume_delete()
+ msg = "vdisk %s has been deleted and detached from remote copy relationship." % self.cvname
+ else:
+ self.log("Exiting with no changes")
+ if self.state in ['absent']:
+ msg = "Change volume [%s] does not exist." % self.cvname
+ else:
+ msg = "No Modifications detected, Change volume [%s] already configured." % self.cvname
+
+ self.module.exit_json(msg=msg, changed=changed)
+
+
+def main():
+ v = IBMSVCchangevolume()
+ try:
+ v.apply()
+ except Exception as e:
+ v.log("Exception in apply(): \n%s", format_exc())
+ v.module.fail_json(msg="Module failed. Error [%s]." % to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_flashcopy.py b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_flashcopy.py
new file mode 100644
index 000000000..63894c1ea
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_flashcopy.py
@@ -0,0 +1,572 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2021 IBM CORPORATION
+# Author(s): Sreshtant Bohidar <sreshtant.bohidar@ibm.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_svc_manage_flashcopy
+short_description: This module manages FlashCopy mappings on IBM Storage Virtualize
+ family systems
+description:
+ - Ansible interface to manage 'mkfcmap', 'rmfcmap', and 'chfcmap' volume commands.
+ This module configures "clone", "snapshot" or "backup" type FlashCopy mappings.
+version_added: "1.4.0"
+options:
+ name:
+ description:
+ - Specifies the name of the FlashCopy mapping.
+ required: true
+ type: str
+ state:
+ description:
+ - Creates or updates (C(present)) or removes (C(absent)) a FlashCopy mapping.
+ choices: [ present, absent ]
+ required: true
+ type: str
+ clustername:
+ description:
+ - The hostname or management IP of the Storage Virtualize system.
+ type: str
+ required: true
+ domain:
+ description:
+ - Domain for the Storage Virtualize system.
+ - Valid when hostname is used for the parameter I(clustername).
+ type: str
+ username:
+ description:
+ - REST API username for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ password:
+ description:
+ - REST API password for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ token:
+ description:
+ - The authentication token to verify a user on the Storage Virtualize system.
+ - To generate a token, use the ibm_svc_auth module.
+ type: str
+ version_added: '1.5.0'
+ copytype:
+ description:
+ - Specifies the copy type when creating the FlashCopy mapping.
+ - Required when I(state=present), to create a FlashCopy mapping.
+ choices: [ snapshot, clone, backup]
+ type: str
+ source:
+ description:
+ - Specifies the name of the source volume.
+ - Required when I(state=present), to create a FlashCopy mapping.
+ type: str
+ target:
+ description:
+ - Specifies the name of the target volume.
+ - Required when I(state=present), to create a FlashCopy mapping.
+ type: str
+ mdiskgrp:
+ description:
+ - Specifies the name of the storage pool to use when creating the target volume.
+ - If unspecified, the pool associated with the source volume is used.
+ - Valid when I(state=present), to create a FlashCopy mapping.
+ type: str
+ consistgrp:
+ description:
+ - Specifies the name of the consistency group to which the FlashCopy mapping is to be added.
+ - Parameters I(consistgrp) and I(noconsistgrp) are mutually exclusive.
+ - Valid when I(state=present), to create or modify a FlashCopy mapping.
+ type: str
+ noconsistgrp:
+ description:
+ - If specified True, FlashCopy mapping is removed from the consistency group.
+ - Parameters I(noconsistgrp) and I(consistgrp) are mutually exclusive.
+ - Valid when I(state=present), to modify a FlashCopy mapping.
+ type: bool
+ copyrate:
+ description:
+ - Specifies the copy rate. The rate varies between 0-150.
+ - If unspecified, the default copy rate of 50 for clone and 0 for snapshot is used.
+ - Valid when I(state=present), to create or modify a FlashCopy mapping.
+ type: str
+ grainsize:
+ description:
+ - Specifies the grain size for the FlashCopy mapping.
+ - The grainsize can be set to 64 or 256. The default value is 256.
+ - Valid when I(state=present), to create a FlashCopy mapping.
+ type: str
+ force:
+ description:
+ - Brings the target volume online. This parameter is required if the FlashCopy mapping is in the stopped state.
+ - Valid when I(state=absent), to delete a FlashCopy mapping.
+ type: bool
+ old_name:
+ description:
+ - Specifies the old name of an mdiskgrp.
+ - Applies when I(state=present), to rename the existing mdiskgrp.
+ type: str
+ validate_certs:
+ description:
+ - Validates certification.
+ default: false
+ type: bool
+ log_path:
+ description:
+ - Path of debug log file.
+ type: str
+author:
+ - Sreshtant Bohidar(@Sreshtant-Bohidar)
+notes:
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = '''
+- name: Create FlashCopy mapping for snapshot
+ ibm.storage_virtualize.ibm_svc_manage_flashcopy:
+ clustername: "{{clustername}}"
+ domain: "{{domain}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ log_path: /tmp/playbook.debug
+ state: present
+ name: snapshot-name
+ copytype: snapshot
+ source: source-volume-name
+ target: target-volume-name
+ mdiskgrp: Pool0
+ consistgrp: consistencygroup-name
+ copyrate: 50
+ grainsize: 64
+- name: Create FlashCopy mapping for clone
+ ibm.storage_virtualize.ibm_svc_manage_flashcopy:
+ clustername: "{{clustername}}"
+ domain: "{{domain}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ log_path: /tmp/playbook.debug
+ state: present
+ name: snapshot-name
+ copytype: clone
+ source: source-volume-name
+ target: target-volume-name
+ mdiskgrp: Pool0
+ consistgrp: consistencygroup-name
+ copyrate: 50
+ grainsize: 64
+- name: Create FlashCopy mapping for backup
+ ibm.storage_virtualize.ibm_svc_manage_flashcopy:
+ clustername: "{{clustername}}"
+ domain: "{{domain}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ log_path: /tmp/playbook.debug
+ state: present
+ name: snapshot-name
+ copytype: backup
+ source: source-volume-name
+ target: target-volume-name
+ mdiskgrp: Pool0
+ copyrate: 50
+ grainsize: 64
+- name: Delete FlashCopy mapping for snapshot
+ ibm.storage_virtualize.ibm_svc_manage_flashcopy:
+ clustername: "{{clustername}}"
+ domain: "{{domain}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ log_path: /tmp/playbook.debug
+ name: snapshot-name
+ state: absent
+ force: true
+- name: Delete FlashCopy mapping for clone
+ ibm.storage_virtualize.ibm_svc_manage_flashcopy:
+ clustername: "{{clustername}}"
+ domain: "{{domain}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ log_path: /tmp/playbook.debug
+ name: clone-name
+ state: absent
+ force: true
+'''
+
+RETURN = '''#'''
+
+from traceback import format_exc
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi, svc_argument_spec, get_logger
+from ansible.module_utils._text import to_native
+import time
+
+
+class IBMSVCFlashcopy(object):
+ def __init__(self):
+ argument_spec = svc_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type='str', required=True),
+ copytype=dict(type='str', required=False, choices=['snapshot', 'clone', 'backup']),
+ source=dict(type='str', required=False),
+ target=dict(type='str', required=False),
+ mdiskgrp=dict(type='str', required=False),
+ state=dict(type='str', required=True, choices=['present', 'absent']),
+ consistgrp=dict(type='str', required=False),
+ noconsistgrp=dict(type='bool', required=False),
+ copyrate=dict(type='str', required=False),
+ grainsize=dict(type='str', required=False),
+ force=dict(type='bool', required=False),
+ old_name=dict(type='str')
+ )
+ )
+
+ self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ # logging setup
+ log_path = self.module.params['log_path']
+ log = get_logger(self.__class__.__name__, log_path)
+ self.log = log.info
+
+ # Required
+ self.name = self.module.params['name']
+ self.state = self.module.params['state']
+
+ # Optional
+ self.copytype = self.module.params.get('copytype', False)
+ self.source = self.module.params.get('source', False)
+ self.target = self.module.params.get('target', False)
+ self.mdiskgrp = self.module.params.get('mdiskgrp', False)
+ self.consistgrp = self.module.params.get('consistgrp', False)
+ self.noconsistgrp = self.module.params.get('noconsistgrp', False)
+ self.grainsize = self.module.params.get('grainsize', False)
+ self.copyrate = self.module.params.get('copyrate', False)
+ self.force = self.module.params.get('force', False)
+ self.old_name = self.module.params.get('old_name', '')
+
+ # Handline for mandatory parameter name
+ if not self.name:
+ self.module.fail_json(msg="Missing mandatory parameter: name")
+
+ # Handline for mandatory parameter state
+ if not self.state:
+ self.module.fail_json(msg="Missing mandatory parameter: state")
+
+ self.changed = False
+
+ self.restapi = IBMSVCRestApi(
+ module=self.module,
+ clustername=self.module.params['clustername'],
+ domain=self.module.params['domain'],
+ username=self.module.params['username'],
+ password=self.module.params['password'],
+ validate_certs=self.module.params['validate_certs'],
+ log_path=log_path,
+ token=self.module.params['token']
+ )
+
+ def run_command(self, cmd):
+ return self.restapi.svc_obj_info(cmd=cmd[0], cmdopts=cmd[1], cmdargs=cmd[2])
+
+ def mdata_exists(self, name):
+ merged_result = {}
+ data = self.restapi.svc_obj_info(cmd='lsfcmap', cmdopts=None, cmdargs=[name])
+ if isinstance(data, list):
+ for d in data:
+ merged_result.update(d)
+ else:
+ merged_result = data
+
+ return merged_result
+
+ def gather_data(self):
+ result = [None, None, None, []]
+ commands = [["lsfcmap", None, [self.name]]]
+ if self.state == "present" and self.source:
+ commands.append(["lsvdisk", {'bytes': True, 'filtervalue': 'name=%s' % self.source}, None])
+ if self.state == "present" and self.target:
+ commands.append(["lsvdisk", {'bytes': True, 'filtervalue': 'name=%s' % self.target}, None])
+ commands.append(["lsvdisk", {'bytes': True, 'filtervalue': 'name=%s' % self.target + "_temp_*"}, None])
+ res = list(map(self.run_command, commands))
+ if len(res) == 1:
+ result[0] = res[0]
+ elif len(res) == 2:
+ result[0] = res[0]
+ result[1] = res[1]
+ elif len(res) == 4:
+ result = res
+ return result
+
+ def target_create(self, temp_target_name, sdata):
+ cmd = 'mkvdisk'
+ cmdopts = {}
+ cmdopts['name'] = temp_target_name
+ if self.mdiskgrp:
+ cmdopts['mdiskgrp'] = self.mdiskgrp
+ else:
+ cmdopts['mdiskgrp'] = sdata['mdisk_grp_name']
+ cmdopts['size'] = sdata['capacity']
+ cmdopts['unit'] = 'b'
+ cmdopts['iogrp'] = sdata['IO_group_name']
+ if self.copytype == 'snapshot':
+ cmdopts['rsize'] = '0%'
+ cmdopts['autoexpand'] = True
+
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ self.log("Creating vdisk.. Command %s opts %s", cmd, cmdopts)
+
+ # Run command
+ result = self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
+ self.log("Create target volume result %s", result)
+
+ if 'message' in result:
+ self.changed = True
+ self.log("Create target volume result message %s",
+ result['message'])
+ else:
+ self.module.fail_json(
+ msg="Failed to create target volume [%s]" % self.target)
+
+ def fcmap_create(self, temp_target_name):
+ if self.copyrate:
+ if self.copytype in ('clone', 'backup'):
+ if int(self.copyrate) not in range(1, 151):
+ self.module.fail_json(msg="Copyrate for clone and backup must be in range 1-150")
+ if self.copytype == 'snapshot':
+ if int(self.copyrate) not in range(0, 151):
+ self.module.fail_json(msg="Copyrate for snapshot must be in range 0-150")
+ else:
+ if self.copytype in ('clone', 'backup'):
+ self.copyrate = 50
+ elif self.copytype == 'snapshot':
+ self.copyrate = 0
+
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ cmd = 'mkfcmap'
+ cmdopts = {}
+ cmdopts['name'] = self.name
+ cmdopts['source'] = self.source
+ cmdopts['target'] = temp_target_name
+ cmdopts['copyrate'] = self.copyrate
+ if self.grainsize:
+ cmdopts['grainsize'] = self.grainsize
+ if self.consistgrp:
+ cmdopts['consistgrp'] = self.consistgrp
+ if self.copytype == 'clone':
+ cmdopts['autodelete'] = True
+ if self.copytype == 'backup':
+ cmdopts['incremental'] = True
+ self.log("Creating fc mapping.. Command %s opts %s",
+ cmd, cmdopts)
+ # Run command
+ result = self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
+ self.log("Create flash copy mapping relationship result %s", result)
+
+ if 'message' in result:
+ self.changed = True
+ self.log("Create flash copy mapping relationship result "
+ "message %s", result['message'])
+ else:
+ self.module.fail_json(msg="Failed to create FlashCopy mapping "
+ "relationship [%s]" % self.name)
+
+ def fcmap_delete(self):
+ self.log("Deleting flash copy mapping relationship'%s'", self.name)
+
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ cmd = 'rmfcmap'
+ cmdopts = {}
+ if self.force:
+ cmdopts['force'] = self.force
+ self.restapi.svc_run_command(cmd, cmdopts, cmdargs=[self.name])
+
+ def rename_temp_to_target(self, temp_name):
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ cmd = 'chvdisk'
+ cmdopts = {}
+ cmdopts['name'] = self.target
+ self.log("Rename %s to %s", cmd, cmdopts)
+ self.restapi.svc_run_command(cmd, cmdopts, cmdargs=[temp_name])
+
+ def fcmap_probe(self, data):
+ props = {}
+ props_not_supported = []
+ if self.source:
+ if data["source_vdisk_name"] != self.source:
+ props_not_supported.append("source")
+ if self.target:
+ if data["target_vdisk_name"] != self.target:
+ props_not_supported.append("target")
+ if self.copytype:
+ if (self.copytype == "snapshot" and data['autodelete'] == "on") or (self.copytype == "clone" and data["autodelete"] != "on"):
+ props_not_supported.append("copytype")
+ if self.grainsize:
+ if data['grain_size'] != self.grainsize:
+ props_not_supported.append("grainsize")
+ if props_not_supported:
+ self.module.fail_json(msg="Update not supported for parameter: " + ", ".join(props_not_supported))
+ self.log("Probe which properties need to be updated...")
+ if data['group_name'] and self.noconsistgrp:
+ props['consistgrp'] = 0
+ if not self.noconsistgrp:
+ if self.consistgrp:
+ if self.consistgrp != data['group_name']:
+ props['consistgrp'] = self.consistgrp
+ if self.copyrate:
+ if self.copyrate != data['copy_rate']:
+ props['copyrate'] = self.copyrate
+ return props
+
+ def fcmap_update(self, modify):
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ if modify:
+ self.log("updating fcmap with properties %s", modify)
+ cmd = 'chfcmap'
+ cmdopts = {}
+ for prop in modify:
+ cmdopts[prop] = modify[prop]
+ cmdargs = [self.name]
+ self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
+
+ # for validating parameter while renaming a volume
+ def parameter_handling_while_renaming(self):
+ parameters = {
+ "copytype": self.copytype,
+ "copyrate": self.copyrate,
+ "source": self.source,
+ "target": self.target,
+ "grainsize": self.grainsize,
+ "mdiskgrp": self.mdiskgrp,
+ "consistgrp": self.consistgrp
+ }
+ parameters_exists = [parameter for parameter, value in parameters.items() if value]
+ if parameters_exists:
+ self.module.fail_json(msg="Parameters {0} not supported while renaming a mapping.".format(parameters_exists))
+
+ def flashcopy_rename(self):
+ msg = None
+ self.parameter_handling_while_renaming()
+ mdata = self.mdata_exists(self.name)
+ old_mdata = self.mdata_exists(self.old_name)
+ if not old_mdata and not mdata:
+ self.module.fail_json(msg="mapping [{0}] does not exists.".format(self.old_name))
+ elif old_mdata and mdata:
+ self.module.fail_json(msg="mapping with name [{0}] already exists.".format(self.name))
+ elif not old_mdata and mdata:
+ msg = "mdisk [{0}] already renamed.".format(self.name)
+ elif old_mdata and not mdata:
+ # when check_mode is enabled
+ if self.module.check_mode:
+ self.changed = True
+ return
+ self.restapi.svc_run_command('chfcmap', {'name': self.name}, [self.old_name])
+ self.changed = True
+ msg = "mapping [{0}] has been successfully rename to [{1}].".format(self.old_name, self.name)
+ return msg
+
+ def apply(self):
+ msg = None
+ modify = []
+
+ if self.state == 'present' and self.old_name:
+ msg = self.flashcopy_rename()
+ self.module.exit_json(msg=msg, changed=self.changed)
+ elif self.state == 'absent' and self.old_name:
+ self.module.fail_json(msg="Rename functionality is not supported when 'state' is absent.")
+ else:
+ mdata, sdata, tdata, temp = self.gather_data()
+ if mdata:
+ if self.state == "present":
+ modify = self.fcmap_probe(mdata)
+ if modify:
+ self.changed = True
+ else:
+ msg = "mapping [%s] already exists" % self.name
+ elif self.state == "absent":
+ self.changed = True
+ else:
+ if self.state == "present":
+ if None in [self.source]:
+ self.module.fail_json(msg="Required while creating FlashCopy mapping: 'source'")
+ if not sdata:
+ self.module.fail_json(msg="The source volume [%s] doesn't exist." % self.source)
+ if tdata:
+ if sdata[0]["capacity"] == tdata[0]["capacity"]:
+ if self.copytype == 'clone':
+ msg = "target [%s] already exists." % self.target
+ elif self.copytype == 'snapshot':
+ msg = "target [%s] already exists, fcmap would not be created." % self.target
+ elif sdata[0]["capacity"] != tdata[0]["capacity"]:
+ self.module.fail_json(msg="source and target must be of same size")
+ if sdata and not tdata:
+ self.changed = True
+ elif self.state == "absent":
+ msg = "mapping [%s] does not exist" % self.name
+
+ if self.changed:
+ if self.state == "present" and not modify:
+ if None in [self.source, self.target, self.copytype]:
+ self.module.fail_json(msg="Required while creating FlashCopy mapping: 'source', 'target' and 'copytype'")
+ temp_target = "%s_temp_%s" % (self.target, time.time())
+ if len(temp) == 0:
+ self.target_create(temp_target, sdata[0])
+ self.fcmap_create(temp_target)
+ self.rename_temp_to_target(temp_target)
+ msg = "mapping [%s] has been created" % self.name
+ elif len(temp) == 1:
+ self.fcmap_create(temp[0]["name"])
+ self.rename_temp_to_target(temp[0]["name"])
+ msg = "mapping [%s] has been created" % self.name
+ elif len(temp) > 1:
+ self.module.fail_json(msg="Multiple %s_temp_* volumes exists" % self.target)
+ elif self.state == "present" and modify:
+ self.fcmap_update(modify)
+ msg = "mapping [%s] has been modified" % self.name
+ elif self.state == "absent":
+ self.fcmap_delete()
+ msg = "mapping [%s] has been deleted" % self.name
+
+ else:
+ self.log("exiting with no changes")
+ if self.state == 'absent':
+ msg = "mapping [%s] does not exist." % self.name
+ else:
+ msg = "mapping [%s] already exists." % self.name
+
+ if self.module.check_mode:
+ msg = 'skipping changes due to check mode.'
+
+ self.module.exit_json(msg=msg, changed=self.changed)
+
+
+def main():
+ v = IBMSVCFlashcopy()
+ try:
+ v.apply()
+ except Exception as e:
+ v.log("Exception in apply(): \n%s", format_exc())
+ v.module.fail_json(msg="Module failed. Error [%s]." % to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_ip.py b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_ip.py
new file mode 100644
index 000000000..2da167760
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_ip.py
@@ -0,0 +1,317 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2022 IBM CORPORATION
+# Author(s): Sreshtant Bohidar <sreshtant.bohidar@ibm.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_svc_manage_ip
+short_description: This module manages IP provisioning on IBM Storage Virtualize family systems
+description:
+ - Ansible interface to manage 'mkip' and 'rmip' commands.
+ - This module can run on all IBM Storage Virtualize systems running on 8.4.2.0 or later.
+version_added: "1.8.0"
+options:
+ state:
+ description:
+ - Creates (C(present)) or removes (C(absent)) an IP address.
+ choices: [ present, absent ]
+ required: true
+ type: str
+ clustername:
+ description:
+ - The hostname or management IP of the Storage Virtualize system.
+ type: str
+ required: true
+ domain:
+ description:
+ - Domain for the Storage Virtualize system.
+ - Valid when hostname is used for the parameter I(clustername).
+ type: str
+ username:
+ description:
+ - REST API username for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ password:
+ description:
+ - REST API password for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ token:
+ description:
+ - The authentication token to verify a user on the Storage Virtualize system.
+ - To generate a token, use the ibm_svc_auth module.
+ type: str
+ node:
+ description:
+ - Specifies the name of the node.
+ type: str
+ required: true
+ port:
+ description:
+ - Specifies a port ranging from 1 - 16 to which IP shall be assigned.
+ type: int
+ required: true
+ portset:
+ description:
+ - Specifies the name of the portset object.
+ type: str
+ ip_address:
+ description:
+ - Specifies a valid ipv4/ipv6 address.
+ type: str
+ required: true
+ subnet_prefix:
+ description:
+ - Specifies the prefix of subnet mask.
+ - Applies when I(state=present).
+ type: int
+ gateway:
+ description:
+ - Specifies the gateway address.
+ - Applies when I(state=present).
+ type: str
+ vlan:
+ description:
+ - Specifies a vlan id ranging from 1 - 4096.
+ - Applies when I(state=present).
+ type: int
+ shareip:
+ description:
+ - Specifies the flag when IP is shared between multiple portsets.
+ - Applies when I(state=present).
+ type: bool
+ validate_certs:
+ description:
+ - Validates certification.
+ default: false
+ type: bool
+ log_path:
+ description:
+ - Path of debug log file.
+ type: str
+author:
+ - Sreshtant Bohidar(@Sreshtant-Bohidar)
+notes:
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = '''
+- name: Create IP provisioning
+ ibm.storage_virtualize.ibm_svc_manage_ip:
+ clustername: "{{cluster}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ log_path: /tmp/playbook.debug
+ node: node1
+ port: 1
+ portset: portset0
+ ip_address: x.x.x.x
+ subnet_prefix: 20
+ gateway: x.x.x.x
+ vlan: 1
+ shareip: true
+ state: present
+- name: Remove IP provisioning
+ ibm.storage_virtualize.ibm_svc_manage_ip:
+ clustername: "{{cluster}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ log_path: /tmp/playbook.debug
+ node: node1
+ port: 1
+ portset: portset0
+ ip_address: x.x.x.x
+ state: absent
+'''
+
+RETURN = '''#'''
+
+from traceback import format_exc
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi, svc_argument_spec, get_logger
+from ansible.module_utils._text import to_native
+
+
+class IBMSVCIp(object):
+ def __init__(self):
+ argument_spec = svc_argument_spec()
+ argument_spec.update(
+ dict(
+ node=dict(type='str', required=True),
+ state=dict(type='str', required=True, choices=['present', 'absent']),
+ port=dict(type='int', required=True),
+ portset=dict(type='str'),
+ ip_address=dict(type='str', required=True),
+ subnet_prefix=dict(type='int'),
+ gateway=dict(type='str'),
+ vlan=dict(type='int'),
+ shareip=dict(type='bool')
+ )
+ )
+
+ self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ # logging setup
+ log_path = self.module.params['log_path']
+ log = get_logger(self.__class__.__name__, log_path)
+ self.log = log.info
+
+ # Required
+ self.node = self.module.params['node']
+ self.state = self.module.params['state']
+ self.port = self.module.params['port']
+ self.ip_address = self.module.params.get('ip_address', False)
+
+ # Optional
+ self.portset = self.module.params.get('portset', False)
+ self.subnet_prefix = self.module.params.get('subnet_prefix', False)
+ self.gateway = self.module.params.get('gateway', False)
+ self.vlan = self.module.params.get('vlan', False)
+ self.shareip = self.module.params.get('shareip', False)
+
+ # Initialize changed variable
+ self.changed = False
+
+ # creating an instance of IBMSVCRestApi
+ self.restapi = IBMSVCRestApi(
+ module=self.module,
+ clustername=self.module.params['clustername'],
+ domain=self.module.params['domain'],
+ username=self.module.params['username'],
+ password=self.module.params['password'],
+ validate_certs=self.module.params['validate_certs'],
+ log_path=log_path,
+ token=self.module.params['token']
+ )
+
+ def basic_checks(self):
+ if not self.state:
+ self.module.fail_json(msg="The parameter [state] is required.")
+ if self.state == 'present':
+ required_when_present = {
+ 'node': self.node,
+ 'port': self.port,
+ 'ip_address': self.ip_address,
+ 'subnet_prefix': self.subnet_prefix
+ }
+ missing_present = [item for item, value in required_when_present.items() if not value]
+ if missing_present:
+ self.module.fail_json(msg="The parameter {0} is required when state is present.".format(missing_present))
+ if self.state == 'absent':
+ required_when_absent = {
+ 'node': self.node,
+ 'port': self.port,
+ 'ip_address': self.ip_address
+ }
+ not_required_when_absent = {
+ 'subnet_prefix': self.subnet_prefix,
+ 'gateway': self.gateway,
+ 'vlan': self.vlan,
+ 'shareip': self.shareip
+ }
+ missing_absent = [item for item, value in required_when_absent.items() if not value]
+ if missing_absent:
+ self.module.fail_json(msg="The parameter {0} is required when state is absent.".format(missing_absent))
+ not_applicable_absent = [item for item, value in not_required_when_absent.items() if value]
+ if not_applicable_absent:
+ self.module.fail_json(msg="The parameter {0} are not applicable when state is absent.".format(not_applicable_absent))
+
+ def get_ip_info(self):
+ all_data = self.restapi.svc_obj_info(cmd='lsip', cmdopts=None, cmdargs=None)
+ if self.portset:
+ data = list(
+ filter(
+ lambda item: item['node_name'] == self.node and
+ item['port_id'] == str(self.port) and
+ item['portset_name'] == self.portset and
+ item['IP_address'] == self.ip_address, all_data
+ )
+ )
+ else:
+ data = list(
+ filter(
+ lambda item: item['node_name'] == self.node and
+ item['port_id'] == str(self.port) and
+ item['IP_address'] == self.ip_address, all_data
+ )
+ )
+ if len(data) > 1:
+ self.module.fail_json(msg="Module could not find the exact IP with [node, port, ip_address]. Please also use [portset].")
+ self.log('GET: IP data: %s', data)
+ return data
+
+ def create_ip(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+ command = 'mkip'
+ command_options = {
+ 'node': self.node,
+ 'port': self.port,
+ 'ip': self.ip_address,
+ 'prefix': self.subnet_prefix
+ }
+ if self.portset:
+ command_options['portset'] = self.portset
+ if self.gateway:
+ command_options['gw'] = self.gateway
+ if self.vlan:
+ command_options['vlan'] = self.vlan
+ if self.shareip:
+ command_options['shareip'] = self.shareip
+ result = self.restapi.svc_run_command(command, command_options, cmdargs=None)
+ self.log("create IP result %s", result)
+ if 'message' in result:
+ self.changed = True
+ self.log("create IP result message %s", result['message'])
+ else:
+ self.module.fail_json(
+ msg="Failed to create IP [%s]" % self.ip_address)
+
+ def remove_ip(self, ip_address_id):
+ if self.module.check_mode:
+ self.changed = True
+ return
+ command = 'rmip'
+ command_options = None
+ cmdargs = [ip_address_id]
+ self.restapi.svc_run_command(command, command_options, cmdargs)
+ self.changed = True
+ self.log("removed IP '%s'", self.ip_address)
+
+ def apply(self):
+ msg = None
+ self.basic_checks()
+ if self.state == 'present':
+ self.create_ip()
+ msg = "IP address {0} has been created.".format(self.ip_address)
+ elif self.state == 'absent':
+ ip_data = self.get_ip_info()
+ if ip_data:
+ self.remove_ip(ip_data[0]['id'])
+ msg = "IP address {0} has been removed.".format(self.ip_address)
+ else:
+ msg = "IP address {0} does not exist.".format(self.ip_address)
+
+ self.module.exit_json(msg=msg, changed=self.changed)
+
+
+def main():
+ v = IBMSVCIp()
+ try:
+ v.apply()
+ except Exception as e:
+ v.log("Exception in apply(): \n%s", format_exc())
+ v.module.fail_json(msg="Module failed. Error [%s]." % to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_migration.py b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_migration.py
new file mode 100644
index 000000000..86bab427f
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_migration.py
@@ -0,0 +1,779 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2021 IBM CORPORATION
+# Author(s): Rohit kumar <rohit.kumar6@ibm.com>
+# Shilpi Jain <shilpi.jain1@ibm.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_svc_manage_migration
+short_description: This module manages volume migration between clusters on IBM Storage Virtualize family systems
+description:
+ - Ansible interface to manage the migration commands.
+version_added: "1.6.0"
+options:
+ type_of_migration:
+ description:
+ - Specifies the type of migration whether it is migration across pools or migration across clusters
+ choices: [across_pools, across_clusters]
+ default: across_clusters
+ type: str
+ version_added: '1.11.0'
+ new_pool:
+ description:
+ - Specifies the pool on which the volume has to be migrated.
+ - Valid only when I(type_of_migration=across_pools).
+ type: str
+ version_added: '1.11.0'
+ source_volume:
+ description:
+ - Specifies the name of the existing source volume to be used in migration.
+ - Required when I(state=initiate) or I(state=cleanup) or I(type_of_migration=across_pools).
+ type: str
+ target_volume:
+ description:
+ - Specifies the name of the volume to be created on the target system.
+ - Required when I(state=initiate).
+ type: str
+ clustername:
+ description:
+ - The hostname or management IP of the Storage Virtualize system.
+ type: str
+ required: true
+ remote_cluster:
+ description:
+ - Specifies the name of the remote cluster.
+ - Required when I(state=initiate).
+ type: str
+ domain:
+ description:
+ - Domain for the Storage Virtualize system.
+ - Valid when hostname is used for the parameter I(clustername).
+ type: str
+ username:
+ description:
+ - REST API username for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user on the local system.
+ type: str
+ remote_username:
+ description:
+ - REST API username for the partner Storage Virtualize system.
+ - The parameters I(remote_username) and I(remote_password) are required if not using I(remote_token) to authenticate a user on the partner system.
+ - Valid when C(state=initiate).
+ type: str
+ password:
+ description:
+ - REST API password for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user on the local system.
+ type: str
+ remote_password:
+ description:
+ - REST API password for the partner Storage Virtualize system.
+ - The parameters I(remote_username) and I(remote_password) are required if not using I(remote_token) to authenticate a user on the partner system.
+ - Valid when I(state=initiate).
+ type: str
+ relationship_name:
+ description:
+ - Name of the migration relationship. Required when I(state=initiate) or I(state=switch).
+ type: str
+ state:
+ description:
+ - Specifies the different states of the migration process when I(type_of_migration=across_clusters).
+ - C(initiate), creates a volume on remote cluster; optionally used to replicate hosts, and to create and start a migration relationship.
+ - C(switch), switches the migration relationship direction allowing write access on the target volume.
+ - C(cleanup), deletes the source volume and migration relationship after a 'switch'.
+ choices: [initiate, switch, cleanup]
+ type: str
+ token:
+ description:
+ - The authentication token to verify a user on the Storage Virtualize system.
+ - To generate a token, use the ibm_svc_auth module.
+ type: str
+ remote_token:
+ description:
+ - The authentication token to verify a user on the partner Storage Virtualize system.
+ - To generate a token, use the ibm_svc_auth module.
+ Valid when I(state=initiate).
+ type: str
+ remote_pool:
+ description:
+ - Specifies the pool on which the volume on Partner Storage Virtualize system should get created.
+ - Required when I(state=initiate).
+ type: str
+ validate_certs:
+ description:
+ - Validates certification.
+ default: false
+ type: bool
+ remote_validate_certs:
+ description:
+ - Validates certification for partner Storage Virtualize system.
+ - Valid when I(state=initiate).
+ default: false
+ type: bool
+ replicate_hosts:
+ description:
+ - Replicates the hosts mapped to a source volume on the source system, to the target system, and maps the hosts to the target volume. The
+ user can use ibm_svc_host and ibm_svc_vol_map modules to create and map hosts to the target volume for an
+ existing migration relationship.
+ - Valid when I(state=initiate).
+ default: false
+ type: bool
+ log_path:
+ description:
+ - Path of debug log file.
+ type: str
+author:
+ - Rohit Kumar(@rohitk-github)
+ - Shilpi Jain(@Shilpi-J)
+notes:
+ - This module supports C(check_mode).
+ - This module supports both volume migration across pools and volume migration across clusters.
+ - In case, user does not specify type_of_migration, the module shall proceed with migration across clusters by default.
+ - In case of I(type_of_migration=across_pools), the only parameters allowed are I(new_pool) and I(source_volume) along with cluster credentials.
+'''
+
+EXAMPLES = '''
+- name: Create a target volume
+ Create a relationship
+ Replicate hosts from source volume to target volume
+ Start a relationship
+ ibm.storage_virtualize.ibm_svc_manage_migration:
+ source_volume: "src_vol"
+ target_volume: "target_vol"
+ clustername: "{{ source_cluster }}"
+ remote_cluster: "{{ remote_cluster }}"
+ token: "{{ source_cluster_token }}"
+ state: initiate
+ replicate_hosts: true
+ remote_token: "{{ partner_cluster_token }}"
+ relationship_name: "migrate_vol"
+ log_path: /tmp/ansible.log
+ remote_pool: "{{ remote_pool }}"
+- name: Switch replication direction
+ ibm.storage_virtualize.ibm_svc_manage_migration:
+ relationship_name: "migrate_vol"
+ clustername: "{{ source_cluster }}"
+ token: "{{ source_cluster_token }}"
+ state: switch
+ log_path: /tmp/ansible.log
+- name: Delete source volume and migration relationship
+ ibm.storage_virtualize.ibm_svc_manage_migration:
+ clustername: "{{ source_cluster }}"
+ state: cleanup
+ source_volume: "src_vol"
+ token: "{{ source_cluster_token }}"
+ log_path : /tmp/ansible.log
+- name: Migration an existing vol from pool0 to pool1
+ ibm.storage_virtualize.ibm_svc_manage_migration:
+ clustername: "{{ source_cluster }}"
+ token: "{{ source_cluster_token }}"
+ log_path : /tmp/ansible.log
+ type_of_migration : across_pools
+ source_volume : vol1
+ new_pool : pool1
+'''
+
+RETURN = '''#'''
+
+from traceback import format_exc
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi, svc_argument_spec, get_logger
+from ansible.module_utils._text import to_native
+
+
+class IBMSVCMigrate(object):
+ def __init__(self):
+ argument_spec = svc_argument_spec()
+
+ argument_spec.update(
+ dict(
+ type_of_migration=dict(type='str', required=False, default='across_clusters',
+ choices=['across_clusters', 'across_pools']),
+ new_pool=dict(type='str', required=False),
+ source_volume=dict(type='str', required=False),
+ target_volume=dict(type='str', required=False),
+ state=dict(type='str',
+ choices=['initiate', 'switch', 'cleanup']),
+ remote_pool=dict(type='str', required=False),
+ replicate_hosts=dict(type='bool', default=False),
+ relationship_name=dict(type='str', required=False),
+ remote_cluster=dict(type='str', required=False),
+ remote_token=dict(type='str', required=False, no_log=True),
+ remote_validate_certs=dict(type='bool', default=False),
+ remote_username=dict(type='str', required=False),
+ remote_password=dict(type='str', required=False, no_log=True)
+ )
+ )
+
+ self.module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+ self.existing_rel_data = ""
+ self.source_vdisk_data = ""
+ self.hosts_iscsi_flag = False
+
+ # logging setup
+ log_path = self.module.params['log_path']
+ log = get_logger(self.__class__.__name__, log_path)
+ self.log = log.info
+
+ # Required when migration across clusters
+ self.state = self.module.params['state']
+
+ # Required when migration across pools
+ self.new_pool = self.module.params['new_pool']
+
+ # Optional
+ self.type_of_migration = self.module.params['type_of_migration']
+ self.source_volume = self.module.params['source_volume']
+ self.remote_pool = self.module.params['remote_pool']
+ self.target_volume = self.module.params['target_volume']
+ self.relationship_name = self.module.params['relationship_name']
+ self.remote_username = self.module.params['remote_username']
+ self.replicate_hosts = self.module.params['replicate_hosts']
+ self.remote_password = self.module.params['remote_password']
+ self.remote_token = self.module.params['remote_token']
+ self.remote_cluster = self.module.params['remote_cluster']
+ self.remote_validate_certs = self.module.params['remote_validate_certs']
+
+ self.restapi = IBMSVCRestApi(
+ module=self.module,
+ clustername=self.module.params['clustername'],
+ domain=self.module.params['domain'],
+ username=self.module.params['username'],
+ password=self.module.params['password'],
+ validate_certs=self.module.params['validate_certs'],
+ log_path=log_path,
+ token=self.module.params['token']
+ )
+
+ def get_existing_vdisk(self):
+ self.log("Entering function get_existing_vdisk")
+ cmd = 'lsvdisk'
+ cmdargs = {}
+ cmdopts = {'bytes': True}
+ cmdargs = [self.source_volume]
+ remote_vdisk_data = ""
+ existing_vdisk_data = self.restapi.svc_obj_info(cmd, cmdopts, cmdargs)
+ if self.target_volume:
+ cmdargs = [self.target_volume]
+ remote_restapi = self.construct_remote_rest()
+ remote_vdisk_data = remote_restapi.svc_obj_info(cmd, cmdopts, cmdargs)
+ return existing_vdisk_data, remote_vdisk_data
+
+ def basic_checks(self):
+ self.log("Entering function basic_checks()")
+ valid_params = {}
+ valid_params['initiate'] = ['source_volume', 'remote_cluster', 'target_volume', 'replicate_hosts',
+ 'remote_username', 'remote_password', 'relationship_name',
+ 'remote_token', 'remote_pool', 'remote_validate_certs']
+ valid_params['switch'] = ['relationship_name']
+ valid_params['cleanup'] = ['source_volume']
+ param_list = set(valid_params['initiate'] + valid_params['switch'] + valid_params['cleanup'])
+
+ # Check for missing mandatory parameter
+ for param in valid_params[self.state]:
+ param_value = getattr(self, param)
+ if not param_value:
+ if self.state == "initiate":
+ if param == 'remote_validate_certs' or param == 'replicate_hosts':
+ continue
+ if (param == 'remote_username' or param == 'remote_password'):
+ if not self.remote_username or not self.remote_password:
+ if self.remote_token:
+ continue
+ else:
+ self.module.fail_json(msg="You must pass in either pre-acquired remote_token or "
+ "remote_username/remote_password to generate new token.")
+ elif param == 'remote_token':
+ if (self.remote_username and self.remote_password):
+ if not self.remote_token:
+ continue
+ self.module.fail_json(msg="Missing mandatory parameter [%s]." % param)
+
+ # Check for invalid parameters
+ for param in param_list:
+ if self.state == 'initiate':
+ if getattr(self, param):
+ if param not in valid_params['initiate']:
+ self.module.fail_json(msg="Invalid parameter [%s] for state 'initiate'" % param)
+ if self.state == 'switch':
+ if getattr(self, param):
+ if param not in valid_params['switch']:
+ self.module.fail_json(msg="Invalid parameter [%s] for state 'switch'" % param)
+ elif self.state == 'cleanup':
+ if getattr(self, param):
+ if param not in valid_params['cleanup']:
+ self.module.fail_json(msg="Invalid parameter [%s] for state 'cleanup'" % param)
+
+ def get_source_hosts(self):
+ self.log("Entering function get_source_hosts")
+ cmd = 'lsvdiskhostmap'
+ cmdargs = {}
+ cmdopts = {}
+ cmdargs = [self.source_volume]
+ sourcevolume_hosts = self.restapi.svc_obj_info(cmd, cmdopts, cmdargs)
+ return sourcevolume_hosts
+
+ def replicate_source_hosts(self, hosts_data):
+ self.log("Entering function replicate_source_hosts()")
+ merged_result = []
+ hosts_wwpn = {}
+ hosts_iscsi = {}
+ host_list = []
+
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ self.log("creating vdiskhostmaps on target system")
+
+ if isinstance(hosts_data, list):
+ for d in hosts_data:
+ merged_result.append(d)
+ elif hosts_data:
+ merged_result = [hosts_data]
+
+ for host in merged_result:
+ host_list.append(host['host_name'])
+
+ for host in host_list:
+ host_wwpn_list = []
+ host_iscsi_list = []
+ self.log("for host %s", host)
+ data = self.restapi.svc_obj_info(cmd='lshost', cmdopts=None, cmdargs=[host])
+ nodes_data = data['nodes']
+ for node in nodes_data:
+ if 'WWPN' in node.keys():
+ host_wwpn_list.append(node['WWPN'])
+ hosts_wwpn[host] = host_wwpn_list
+ elif 'iscsi_name' in node.keys():
+ host_iscsi_list.append(node['iscsi_name'])
+ hosts_iscsi[host] = host_iscsi_list
+ if hosts_wwpn or hosts_iscsi:
+ self.create_remote_hosts(hosts_wwpn, hosts_iscsi)
+
+ def create_remote_hosts(self, hosts_wwpn, hosts_iscsi):
+ self.log("Entering function create_remote_hosts()")
+
+ if self.module.check_mode:
+ self.changed = True
+ return
+ # Make command
+ remote_hosts_list = []
+ source_host_list = []
+ remote_hosts_list = self.return_remote_hosts()
+ if hosts_iscsi:
+ for host, iscsi_vals in hosts_iscsi.items():
+ source_host_list.append(host)
+ if hosts_wwpn:
+ for host, wwpn_vals in hosts_wwpn.items():
+ source_host_list.append(host)
+
+ cmd = 'mkhost'
+ for host, wwpn in hosts_wwpn.items():
+ if host not in remote_hosts_list:
+ cmdopts = {'name': host, 'force': True}
+ wwpn = ':'.join([str(elem) for elem in wwpn])
+ cmdopts['fcwwpn'] = wwpn
+ remote_restapi = self.construct_remote_rest()
+ remote_restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
+
+ for host, iscsi in hosts_iscsi.items():
+ if host not in remote_hosts_list:
+ cmdopts = {'name': host, 'force': True}
+ iscsi = ','.join([str(elem) for elem in iscsi])
+ cmdopts['iscsiname'] = iscsi
+ remote_restapi = self.construct_remote_rest()
+ remote_restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
+ if source_host_list:
+ self.map_host_vol_remote(source_host_list)
+
+ def map_host_vol_remote(self, host_list):
+ remote_restapi = self.construct_remote_rest()
+ if self.module.check_mode:
+ self.changed = True
+ return
+ for host in host_list:
+ # Run command
+ cmd = 'mkvdiskhostmap'
+ cmdopts = {'force': True}
+ cmdopts['host'] = host
+
+ cmdargs = [self.target_volume]
+ result = remote_restapi.svc_run_command(cmd, cmdopts, cmdargs)
+ self.log("create vdiskhostmap result %s", result)
+
+ if 'message' in result:
+ self.changed = True
+ self.log("create vdiskhostmap result message %s", result['message'])
+ else:
+ self.module.fail_json(msg="Failed to create vdiskhostmap.")
+
+ def vdisk_create(self, data):
+ if not self.remote_pool:
+ self.module.fail_json(msg="You must pass in "
+ "remote_pool to the module.")
+
+ if self.module.check_mode:
+ self.changed = True
+ return
+ self.log("creating vdisk '%s'", self.source_volume)
+ size = int(data[0]['capacity'])
+ # Make command
+ cmd = 'mkvolume'
+ cmdopts = {}
+ if self.remote_pool:
+ cmdopts['pool'] = self.remote_pool
+ cmdopts['name'] = self.target_volume
+ cmdopts['size'] = size
+ cmdopts['unit'] = "b"
+ self.log("creating vdisk command %s opts %s", cmd, cmdopts)
+ # Run command
+ remote_restapi = self.construct_remote_rest()
+ result = remote_restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
+ self.log("create vdisk result %s", result)
+
+ if 'message' in result:
+ self.changed = True
+ self.log("create vdisk result message %s", result['message'])
+ else:
+ self.module.fail_json(msg="Failed to create volume [%s]" % self.source_volume)
+
+ def verify_remote_volume_mapping(self):
+ self.log("Entering function verify_remote_volume_mapping")
+ cmd = 'lsvdiskhostmap'
+ cmdargs = {}
+ cmdopts = {}
+ cmdargs = [self.target_volume]
+ remote_hostmap_data = ""
+ remote_restapi = self.construct_remote_rest()
+ remote_hostmap_data = remote_restapi.svc_obj_info(cmd, cmdopts, cmdargs)
+ if remote_hostmap_data:
+ self.module.fail_json(msg="The target volume has hostmappings, Migration relationship cannot be created.")
+
+ def return_remote_hosts(self):
+ self.log("Entering function return_remote_hosts")
+ cmd = 'lshost'
+ remote_hosts = []
+ cmdopts = {}
+ cmdargs = None
+ remote_hosts_data = []
+ remote_restapi = self.construct_remote_rest()
+ remote_hosts_data = remote_restapi.svc_obj_info(cmd, cmdopts, cmdargs)
+ self.log(len(remote_hosts_data))
+ for host in remote_hosts_data:
+ remote_hosts.append(host['name'])
+ return remote_hosts
+
+ def verify_target(self):
+ self.log("Entering function verify_target()")
+ source_data, target_data = self.get_existing_vdisk()
+ if source_data:
+ if source_data[0]['RC_name']:
+ self.module.fail_json(msg="Source Volume [%s] is already in a relationship." % self.source_volume)
+ if target_data:
+ if target_data[0]['RC_name']:
+ self.module.fail_json(msg="Target Volume [%s] is already in a relationship." % self.target_volume)
+ if target_data[0]['mdisk_grp_name'] != self.remote_pool:
+ self.module.fail_json(msg="Target Volume [%s] exists on a different pool." % self.target_volume)
+ if not source_data:
+ self.module.fail_json(msg="Source Volume [%s] does not exist." % self.source_volume)
+ elif source_data and target_data:
+ source_size = int(source_data[0]['capacity'])
+ remote_size = int(target_data[0]['capacity'])
+ if source_size != remote_size:
+ self.module.fail_json(msg="Remote Volume size is different than that of source volume.")
+ else:
+ self.log("Target volume already exists, verifying volume mappings now..")
+ self.verify_remote_volume_mapping()
+ elif source_data and not target_data:
+ self.vdisk_create(source_data)
+ self.log("Target volume successfully created")
+ self.changed = True
+
+ def discover_partner_system(self):
+ cmd = 'lspartnership'
+ cmdopts = {}
+ cmdargs = [self.remote_cluster]
+ partnership_data = self.restapi.svc_obj_info(cmd, cmdopts, cmdargs)
+ if partnership_data:
+ system_location = partnership_data['location']
+ if system_location == 'local':
+ self.module.fail_json(msg="The relationship could not be created as migration relationships are only allowed to be created to a remote system.")
+ self.partnership_exists = True
+ remote_socket = partnership_data['console_IP']
+ return remote_socket.split(':')[0]
+ else:
+ msg = "The partnership with remote cluster [%s] does not exist." % self.remote_cluster
+ self.module.fail_json(msg=msg)
+
+ def construct_remote_rest(self):
+ remote_ip = self.discover_partner_system()
+ self.remote_restapi = IBMSVCRestApi(
+ module=self.module,
+ domain='',
+ clustername=remote_ip,
+ username=self.module.params['remote_username'],
+ password=self.module.params['remote_password'],
+ validate_certs=self.module.params['remote_validate_certs'],
+ log_path=self.module.params['log_path'],
+ token=self.module.params['remote_token']
+ )
+ return self.remote_restapi
+
+ def create_relationship(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+ self.log("Creating remote copy '%s'", self.relationship_name)
+
+ # Make command
+ cmd = 'mkrcrelationship'
+ cmdopts = {}
+ if self.remote_cluster:
+ cmdopts['cluster'] = self.remote_cluster
+ if self.source_volume:
+ cmdopts['master'] = self.source_volume
+ cmdopts['aux'] = self.target_volume
+ cmdopts['name'] = self.relationship_name
+ cmdopts['migration'] = True
+
+ # Run command
+ self.log("Command %s opts %s", cmd, cmdopts)
+ if not self.existing_rel_data:
+ result = self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
+ self.log("create remote copy result %s", result)
+
+ if 'message' in result:
+ self.changed = True
+ self.log("Succeeded to create remote copy result message %s", result['message'])
+ else:
+ msg = "Failed to create migration relationship [%s]" % self.relationship_name
+ self.module.fail_json(msg=msg)
+
+ def source_vol_relationship(self, volume):
+ """
+ Check if the source volume is associated to any migration relationship.
+ Returns:
+ None if no matching instances
+ """
+
+ source_vdisk_data, target_vdisk_data = self.get_existing_vdisk()
+ if not source_vdisk_data:
+ msg = "Source volume [%s] does not exist" % self.source_volume
+ self.module.exit_json(msg=msg)
+ self.log('Trying to get the remote copy relationship')
+ relationship_name = source_vdisk_data[0]['RC_name']
+ if not relationship_name:
+ self.module.fail_json(msg="Volume [%s] cannot be deleted. No Migration relationship is configured with the volume." % self.source_volume)
+ existing_rel_data = self.restapi.svc_obj_info(cmd='lsrcrelationship', cmdopts=None, cmdargs=[relationship_name])
+ if existing_rel_data['copy_type'] != 'migration':
+ self.module.fail_json(msg="Volume [%s] cannot be deleted. No Migration relationship is configured with the volume." % self.source_volume)
+
+ def existing_rc(self):
+ """
+ Find the relationships such as Metro Mirror, Global Mirror relationships visible to the system.
+
+ Returns:
+ None if no matching instances or a list including all the matching
+ instances
+ """
+ self.log('Trying to get the remote copy relationship %s', self.relationship_name)
+ self.existing_rel_data = self.restapi.svc_obj_info(cmd='lsrcrelationship', cmdopts=None, cmdargs=[self.relationship_name])
+ return self.existing_rel_data
+
+ def verify_existing_rel(self, rel_data):
+ if self.existing_rel_data:
+ master_volume, aux_volume = rel_data['master_vdisk_name'], rel_data['aux_vdisk_name']
+ primary, remotecluster, rel_type = rel_data['primary'], rel_data['aux_cluster_name'], rel_data['copy_type']
+ if rel_type != 'migration':
+ self.module.fail_json(msg="Remote Copy relationship [%s] already exists and is not a migration relationship" % self.relationship_name)
+ if self.source_volume != master_volume:
+ self.module.fail_json(msg="Migration relationship [%s] already exists with a different source volume" % self.relationship_name)
+ if self.target_volume != aux_volume:
+ self.module.fail_json(msg="Migration relationship [%s] already exists with a different target volume" % self.relationship_name)
+ if primary != 'master':
+ self.module.fail_json(msg="Migration relationship [%s] replication direction is incorrect" % self.relationship_name)
+ if remotecluster != self.remote_cluster:
+ self.module.fail_json(msg="Migration relationship [%s] is configured with a different partner system" % self.relationship_name)
+
+ def start_relationship(self):
+ """Start the migration relationship copy process."""
+ cmdopts = {}
+ if self.module.check_mode:
+ self.changed = True
+ return
+ result = self.restapi.svc_run_command(cmd='startrcrelationship', cmdopts=cmdopts, cmdargs=[self.relationship_name])
+
+ if result == '':
+ self.changed = True
+ self.log("succeeded to start the remote copy %s", self.relationship_name)
+ elif 'message' in result:
+ self.changed = True
+ self.log("start the rcrelationship %s with result message %s", self.relationship_name, result['message'])
+ else:
+ msg = "Failed to start the rcrelationship [%s]" % self.relationship_name
+ self.module.fail_json(msg=msg)
+
+ def switch(self):
+ """Switch the replication direction."""
+ cmdopts = {}
+ cmdopts['primary'] = 'aux'
+ if self.existing_rel_data:
+ rel_type = self.existing_rel_data['copy_type']
+ if rel_type != 'migration':
+ self.module.fail_json(msg="Remote Copy relationship [%s] is not a migration relationship." % self.relationship_name)
+ if self.module.check_mode:
+ self.changed = True
+ return
+ result = self.restapi.svc_run_command(cmd='switchrcrelationship', cmdopts=cmdopts, cmdargs=[self.relationship_name])
+ self.log("switch the rcrelationship %s with result %s", self.relationship_name, result)
+ if result == '':
+ self.changed = True
+ self.log("succeeded to switch the remote copy %s", self.relationship_name)
+ elif 'message' in result:
+ self.changed = True
+ self.log("switch the rcrelationship %s with result message %s", self.relationship_name, result['message'])
+ else:
+ msg = "Failed to switch the rcrelationship [%s]" % self.relationship_name
+ self.module.fail_json(msg=msg)
+
+ def delete(self):
+ """Use the rmvolume command to delete the source volume and the existing migration relationship."""
+ if self.module.check_mode:
+ self.changed = True
+ return
+ cmd = 'rmvolume'
+ cmdopts = {}
+ cmdopts['removehostmappings'] = True
+ cmdargs = [self.source_volume]
+ if self.module.check_mode:
+ self.changed = True
+ return
+ result = self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
+
+ # Any error will have been raised in svc_run_command
+ # Command does not output anything when successful.
+ if result == '':
+ self.changed = True
+ self.log("succeeded to delete the source volume %s and associated host mappings and migration relationship", self.source_volume)
+ self.changed = True
+ elif 'message' in result:
+ self.changed = True
+ self.log("delete the source volume %s with result message %s",
+ self.source_volume, result['message'])
+ else:
+ self.module.fail_json(
+ msg="Failed to delete the volume [%s]" % self.source_volume)
+
+ def basic_checks_migrate_vdisk(self):
+ self.log("Entering function basic_checks_migrate_vdisk()")
+ invalid_params = {}
+
+ # Check for missing parameters
+ missing = [item[0] for item in [('new_pool', self.new_pool), ('source_volume', self.source_volume)] if not item[1]]
+ if missing:
+ self.module.fail_json(
+ msg='Missing mandatory parameter: [{0}] for migration across pools'.format(', '.join(missing))
+ )
+
+ invalid_params['across_pools'] = ['state', 'relationship_name', 'remote_cluster', 'remote_username',
+ 'remote_password', 'remote_token', 'remote_pool', 'remote_validate_certs',
+ 'replicate_hosts']
+ param_list = set(invalid_params['across_pools'])
+
+ # Check for invalid parameters
+ for param in param_list:
+ if self.type_of_migration == 'across_pools':
+ if getattr(self, param):
+ if param in invalid_params['across_pools']:
+ self.module.fail_json(msg="Invalid parameter [%s] for volume migration 'across_pools'" % param)
+
+ def migrate_pools(self):
+ self.basic_checks_migrate_vdisk()
+
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ source_data, target_data = self.get_existing_vdisk()
+ if not source_data:
+ msg = "Source volume [%s] does not exist" % self.source_volume
+ self.module.fail_json(msg=msg)
+ elif source_data[0]['mdisk_grp_name'] != self.new_pool:
+ cmd = 'migratevdisk'
+ cmdopts = {}
+ cmdopts['mdiskgrp'] = self.new_pool
+ cmdopts['vdisk'] = self.source_volume
+ self.log("Command %s opts %s", cmd, cmdopts)
+ result = self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
+
+ if result == '':
+ self.changed = True
+ else:
+ self.module.fail_json(msg="Failed to migrate volume in different pool.")
+ else:
+ msg = "No modifications done. New pool [%s] is same" % self.new_pool
+ self.module.exit_json(msg=msg, changed=False)
+
+ def apply(self):
+ changed = False
+ msg = None
+ if self.type_of_migration == 'across_pools':
+ self.migrate_pools()
+ msg = "Source Volume migrated successfully to new pool [%s]." % self.new_pool
+ changed = True
+ else:
+ self.basic_checks()
+ if self.state == 'initiate' or self.state == 'switch':
+ existing_rc_data = self.existing_rc()
+ if not existing_rc_data:
+ if self.state == 'initiate':
+ self.verify_target()
+ self.create_relationship()
+ if self.replicate_hosts:
+ hosts_data = self.get_source_hosts()
+ self.replicate_source_hosts(hosts_data)
+ self.start_relationship()
+ changed = True
+ msg = "Migration Relationship [%s] has been started." % self.relationship_name
+ elif self.state == 'switch':
+ msg = "Relationship [%s] does not exist." % self.relationship_name
+ changed = False
+ self.module.fail_json(msg=msg)
+ elif self.state == 'initiate':
+ self.verify_existing_rel(existing_rc_data)
+ self.start_relationship()
+ msg = "Migration Relationship [%s] has been started." % self.relationship_name
+ changed = True
+ elif self.state == 'switch':
+ self.switch()
+ msg = "Migration Relationship [%s] successfully switched." % self.relationship_name
+ changed = True
+ elif self.state == 'cleanup':
+ self.source_vol_relationship(self.source_volume)
+ self.delete()
+ msg = "Source Volume [%s] deleted successfully." % self.source_volume
+ changed = True
+ if self.module.check_mode:
+ msg = "skipping changes due to check mode."
+ self.module.exit_json(msg=msg, changed=changed)
+
+
+def main():
+ v = IBMSVCMigrate()
+ try:
+ v.apply()
+ except Exception as e:
+ v.log("Exception in apply(): \n%s", format_exc())
+ v.module.fail_json(msg="Module failed. Error [%s]." % to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_mirrored_volume.py b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_mirrored_volume.py
new file mode 100644
index 000000000..f0ba90abf
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_mirrored_volume.py
@@ -0,0 +1,757 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2021 IBM CORPORATION
+# Author(s): Rohit Kumar <rohit.kumar6@ibm.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_svc_manage_mirrored_volume
+short_description: This module manages mirrored volumes on IBM Storage Virtualize
+ family systems
+description:
+ - Ansible interface to manage 'mkvolume', 'addvolumecopy', 'rmvolumecopy', and 'rmvolume' volume commands.
+version_added: "1.4.0"
+options:
+ name:
+ description:
+ - Specifies the name to assign to the new volume.
+ required: true
+ type: str
+ state:
+ description:
+ - Creates (C(present)) or removes (C(absent)) a mirrored volume.
+ choices: [ absent, present ]
+ required: true
+ type: str
+ clustername:
+ description:
+ - The hostname or management IP of the Storage Virtualize system.
+ type: str
+ required: true
+ domain:
+ description:
+ - Domain for the Storage Virtualize system.
+ - Valid when hostname is used for the parameter I(clustername).
+ type: str
+ username:
+ description:
+ - REST API username for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ password:
+ description:
+ - REST API password for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ token:
+ description:
+ - The authentication token to verify a user on the Storage Virtualize system.
+ - To generate a token, use the ibm_svc_auth module.
+ type: str
+ version_added: '1.5.0'
+ poolA:
+ description:
+ - Specifies the name of first storage pool to be used when creating a mirrored volume.
+ type: str
+ poolB:
+ description:
+ - Specifies the name of second storage pool to be used when creating a mirrored volume.
+ type: str
+ type:
+ description:
+ - Specifies the desired volume type.
+ - When the type is C(local hyperswap), a HyperSwap volume gets created.
+ - When the type is C(standard) and values for I(PoolA) and I(PoolB) arguments are also specified,
+ a "standard mirror" volume gets created.
+ - If a "standard" mirrored volume exists and either I(PoolA) or I(PoolB)
+ is specified, the mirrored volume gets converted to a standard volume.
+ choices: [ local hyperswap, standard ]
+ type: str
+ thin:
+ description:
+ - Specifies if the volume to be created is thin-provisioned.
+ type: bool
+ compressed:
+ description:
+ - Specifies if the volume to be created is compressed.
+ type: bool
+ deduplicated:
+ description:
+ - Specifies if the volume to be created is deduplicated.
+ type: bool
+ grainsize:
+ description:
+ - Specifies the grain size (in KB) to use when
+ creating the HyperSwap volume.
+ type: str
+ rsize:
+ description:
+ - Specifies the rsize (buffersize) in %. Defines how much physical space
+ is initially allocated to the thin-provisioned or compressed volume.
+ type: str
+ size:
+ description:
+ - Specifies the size of mirrored volume in MB. This can also be used
+ to resize a mirrored volume. When resizing, only mandatory parameters can
+ be passed.
+ type: str
+ validate_certs:
+ description:
+ - Validates certification.
+ default: false
+ type: bool
+ log_path:
+ description:
+ - Path of debug log file.
+ type: str
+author:
+ - Rohit Kumar(@rohitk-github)
+notes:
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = '''
+- name: Create a HyperSwap volume
+ ibm.storage_virtualize.ibm_svc_manage_mirrored_volume:
+ clustername: "{{clustername}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ log_path: /tmp/playbook.debug
+ type: "local hyperswap"
+ name: "vol1"
+ state: present
+ poolA: "pool1"
+ poolB: "pool2"
+ size: "1024"
+- name: Create a thin-provisioned HyperSwap volume
+ ibm.storage_virtualize.ibm_svc_manage_mirrored_volume:
+ clustername: "{{clustername}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ log_path: /tmp/playbook.debug
+ type: "local hyperswap"
+ name: "vol2"
+ state: present
+ poolA: "pool1"
+ poolB: "pool2"
+ size: "1024"
+ thin: true
+- name: Delete a mirrored volume
+ ibm.storage_virtualize.ibm_svc_manage_mirrored_volume:
+ clustername: "{{clustername}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ log_path: /tmp/playbook.debug
+ name: "vol2"
+ state: absent
+- name: Create a standard mirror volume
+ block:
+ - name: Create Volume
+ ibm.storage_virtualize.ibm_svc_manage_mirrored_volume:
+ clustername: "{{clustername}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ log_path: /tmp/playbook.debug
+ name: "vol4"
+ state: present
+ type: "standard"
+ poolA: "pool1"
+ poolB: "pool3"
+- name: Resize an existing mirrored volume
+ block:
+ - name: Resize an existing mirrored volume
+ ibm.storage_virtualize.ibm_svc_manage_mirrored_volume:
+ clustername: "{{clustername}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ log_path: /tmp/playbook.debug
+ name: "vol1"
+ state: present
+ size: "{{new_size}}"
+'''
+
+RETURN = '''#'''
+
+from traceback import format_exc
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi, svc_argument_spec, get_logger
+from ansible.module_utils._text import to_native
+
+
+class IBMSVCvolume(object):
+ def __init__(self):
+ argument_spec = svc_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', required=True, choices=['absent',
+ 'present']),
+ poolA=dict(type='str', required=False),
+ poolB=dict(type='str', required=False),
+ size=dict(type='str', required=False),
+ thin=dict(type='bool', required=False),
+ type=dict(type='str', required=False, choices=['local hyperswap', 'standard']),
+ grainsize=dict(type='str', required=False),
+ rsize=dict(type='str', required=False),
+ compressed=dict(type='bool', required=False),
+ deduplicated=dict(type='bool', required=False)
+
+ )
+ )
+
+ self.module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+ self.vdisk_type = ""
+ self.discovered_poolA = ""
+ self.discovered_poolB = ""
+ self.discovered_standard_vol_pool = ""
+ self.poolA_data = ""
+ self.poolB_data = ""
+ self.isdrp = False
+ self.expand_flag = False
+ self.shrink_flag = False
+
+ # logging setup
+ log_path = self.module.params.get('log_path')
+ log = get_logger(self.__class__.__name__, log_path)
+ self.log = log.info
+
+ # Required
+ self.name = self.module.params.get('name')
+ self.state = self.module.params.get('state')
+
+ if not self.name:
+ self.module.fail_json(msg="Missing mandatory parameter: name")
+ if not self.state:
+ self.module.fail_json(msg="Missing mandatory parameter: state")
+
+ # Optional
+ self.poolA = self.module.params.get('poolA')
+ self.poolB = self.module.params.get('poolB')
+ self.size = self.module.params.get('size')
+ self.type = self.module.params.get('type')
+ self.compressed = self.module.params.get('compressed')
+ self.thin = self.module.params.get('thin')
+ self.deduplicated = self.module.params.get('deduplicated')
+ self.rsize = self.module.params.get('rsize')
+ self.grainsize = self.module.params.get('grainsize')
+
+ self.restapi = IBMSVCRestApi(
+ module=self.module,
+ clustername=self.module.params.get('clustername'),
+ domain=self.module.params.get('domain'),
+ username=self.module.params.get('username'),
+ password=self.module.params.get('password'),
+ validate_certs=self.module.params.get('validate_certs'),
+ log_path=log_path,
+ token=self.module.params['token']
+ )
+
+ def get_existing_vdisk(self):
+ self.log("Entering function get_existing_vdisk")
+ cmd = 'lsvdisk'
+ cmdargs = {}
+ cmdopts = {'bytes': True}
+ cmdargs = [self.name]
+ existing_vdisk_data = self.restapi.svc_obj_info(cmd, cmdopts, cmdargs)
+ return existing_vdisk_data
+
+ def basic_checks(self, data):
+ self.log("Entering function basic_checks")
+ if self.poolA:
+ self.poolA_data = self.restapi.svc_obj_info(cmd='lsmdiskgrp', cmdopts=None, cmdargs=[self.poolA])
+ if not self.poolA_data:
+ self.module.fail_json(msg="PoolA does not exist")
+ if self.poolB:
+ self.poolB_data = self.restapi.svc_obj_info(cmd='lsmdiskgrp', cmdopts=None, cmdargs=[self.poolB])
+ if not self.poolB_data:
+ self.module.fail_json(msg="PoolB does not exist")
+ if self.state == "present" and not self.type and not self.size:
+ self.module.fail_json(msg="missing required argument: type")
+ if self.poolA and self.poolB:
+ if self.poolA == self.poolB:
+ self.module.fail_json(msg="poolA and poolB cannot be same")
+ siteA, siteB = self.discover_site_from_pools()
+ if siteA != siteB and self.type == "standard":
+ self.module.fail_json(msg="To create Standard Mirrored volume, provide pools belonging to same site.")
+ if not self.poolA and not self.poolB and self.state == "present" and not self.size:
+ self.module.fail_json(msg="Both poolA and poolB cannot be empty")
+ if self.type == "local hyperswap" and self.state != 'absent':
+ if not self.poolA or not self.poolB:
+ self.module.fail_json(msg="Both poolA and poolB need to be passed when type is 'local hyperswap'")
+
+ def discover_vdisk_type(self, data):
+ # Discover the vdisk type. this function is called if the volume already exists.
+ self.log("Entering function discover_vdisk_type")
+ is_std_mirrored_vol = False
+ is_hs_vol = False
+ if data[0]['type'] == "many":
+ is_std_mirrored_vol = True
+ self.discovered_poolA = data[1]['mdisk_grp_name']
+ self.discovered_poolB = data[2]['mdisk_grp_name']
+ self.log("The discovered standard mirrored volume \"%s\" belongs to \
+pools \"%s\" and \"%s\"", self.name, self.discovered_poolA, self.discovered_poolB)
+
+ relationship_name = data[0]['RC_name']
+ if relationship_name:
+ rel_data = self.restapi.svc_obj_info(cmd='lsrcrelationship', cmdopts=None, cmdargs=[relationship_name])
+ if rel_data['copy_type'] == "activeactive":
+ is_hs_vol = True
+ if is_hs_vol:
+ master_vdisk = rel_data['master_vdisk_name']
+ aux_vdisk = rel_data['aux_vdisk_name']
+ master_vdisk_data = self.restapi.svc_obj_info(cmd='lsvdisk', cmdopts=None, cmdargs=[master_vdisk])
+ aux_vdisk_data = self.restapi.svc_obj_info(cmd='lsvdisk', cmdopts=None, cmdargs=[aux_vdisk])
+ if is_std_mirrored_vol:
+ self.discovered_poolA = master_vdisk_data[1]['mdisk_grp_name']
+ self.discovered_poolB = aux_vdisk_data[1]['mdisk_grp_name']
+ self.log("The discovered mixed volume \"%s\" belongs to pools \"%s\" and \"%s\"", self.name, self.discovered_poolA, self.discovered_poolB)
+ else:
+ self.discovered_poolA = master_vdisk_data[0]['mdisk_grp_name']
+ self.discovered_poolB = aux_vdisk_data[0]['mdisk_grp_name']
+ self.log("The discovered HyperSwap volume \"%s\" belongs to pools\
+ \"%s\" and \"%s\"", self.name, self.discovered_poolA, self.discovered_poolB)
+
+ if is_std_mirrored_vol and is_hs_vol:
+ self.module.fail_json(msg="Unsupported Configuration: Both HyperSwap and Standard Mirror \
+are configured on this volume")
+ elif is_hs_vol:
+ vdisk_type = "local hyperswap"
+ elif is_std_mirrored_vol:
+ vdisk_type = "standard mirror"
+ if not is_std_mirrored_vol and not is_hs_vol:
+ mdisk_grp_name = data[0]['mdisk_grp_name']
+ self.discovered_standard_vol_pool = mdisk_grp_name
+ vdisk_type = "standard"
+ self.log("The standard volume %s belongs to pool \"%s\"", self.name, self.discovered_standard_vol_pool)
+ return vdisk_type
+
+ def discover_site_from_pools(self):
+ self.log("Entering function discover_site_from_pools")
+ poolA_site = self.poolA_data['site_name']
+ poolB_site = self.poolB_data['site_name']
+ return poolA_site, poolB_site
+
+ def vdisk_probe(self, data):
+ self.log("Entering function vdisk_probe")
+ props = []
+ resizevolume_flag = False
+ if self.type == "local hyperswap" and self.vdisk_type == "standard mirror":
+ self.module.fail_json(msg="You cannot \
+update the topolgy from standard mirror to HyperSwap")
+ if (self.vdisk_type == "local hyperswap" or self.vdisk_type == "standard mirror") and self.size:
+ size_in_bytes = int(self.size) * 1024 * 1024
+ existing_size = int(data[0]['capacity'])
+ if size_in_bytes != existing_size:
+ resizevolume_flag = True
+ props += ['resizevolume']
+ if size_in_bytes > existing_size:
+ self.changebysize = size_in_bytes - existing_size
+ self.expand_flag = True
+ elif size_in_bytes < existing_size:
+ self.changebysize = existing_size - size_in_bytes
+ self.shrink_flag = True
+ if self.poolA and self.poolB:
+ if self.vdisk_type == "local hyperswap" and self.type == "standard":
+ self.module.fail_json(msg="HyperSwap Volume cannot be converted to standard mirror")
+ if self.vdisk_type == "standard mirror" or self.vdisk_type == "local hyperswap":
+ if (self.poolA == self.discovered_poolA or self.poolA == self.discovered_poolB)\
+ and (self.poolB == self.discovered_poolA or self.poolB == self.discovered_poolB) and not resizevolume_flag:
+ return props
+ elif not resizevolume_flag:
+ self.module.fail_json(msg="Pools for Standard Mirror or HyperSwap volume cannot be updated")
+ elif self.vdisk_type == "standard" and self.type == "local hyperswap":
+ # input poolA or poolB must belong to given Volume
+ if self.poolA == self.discovered_standard_vol_pool or self.poolB == self.discovered_standard_vol_pool:
+ props += ['addvolumecopy']
+ else:
+ self.module.fail_json(msg="One of the input pools must belong to the Volume")
+ elif self.vdisk_type == "standard" and self.type == "standard":
+ if self.poolA == self.discovered_standard_vol_pool or self.poolB == self.discovered_standard_vol_pool:
+ props += ['addvdiskcopy']
+ else:
+ self.module.fail_json(msg="One of the input pools must belong to the Volume")
+ elif self.vdisk_type and not self.type:
+ self.module.fail_json(msg="missing required argument: type")
+ elif not self.poolA or not self.poolB:
+ if self.vdisk_type == "standard":
+ if self.poolA == self.discovered_standard_vol_pool or self.poolB == self.discovered_standard_vol_pool:
+ self.log("Standard Volume already exists, no modifications done")
+ return props
+ if self.poolA:
+ if self.poolA == self.discovered_poolA or self.poolA == self.discovered_poolB:
+ props += ['rmvolumecopy']
+ else:
+ self.module.fail_json(msg="One of the input pools must belong to the Volume")
+ elif self.poolB:
+ if self.poolB == self.discovered_poolA or self.poolB == self.discovered_poolB:
+ props += ['rmvolumecopy']
+ else:
+ self.module.fail_json(msg="One of the input pools must belong to the Volume")
+ if not (self.poolA or not self.poolB) and not self.size:
+ if (self.system_topology == "hyperswap" and self.type == "local hyperswap"):
+ self.module.fail_json(msg="Type must be standard if either PoolA or PoolB is not specified.")
+ return props
+
+ def resizevolume(self):
+ if self.thin is not None or self.deduplicated is not None or self.rsize is not None or self.grainsize is not None \
+ or self.compressed is not None or self.poolA is not None or self.poolB is not None or self.type is not None:
+ self.module.fail_json(msg="Volume already exists, Parameter 'thin', 'deduplicated', 'rsize', 'grainsize', 'compressed' \
+'PoolA', 'PoolB' or 'type' cannot be passed while resizing the volume.")
+
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ cmd = ""
+ cmdopts = {}
+ if self.vdisk_type == "local hyperswap" and self.expand_flag:
+ cmd = "expandvolume"
+ elif self.vdisk_type == "local hyperswap" and self.shrink_flag:
+ self.module.fail_json(msg="Size of a HyperSwap Volume cannot be shrinked")
+ elif self.vdisk_type == "standard mirror" and self.expand_flag:
+ cmd = "expandvdisksize"
+ elif self.vdisk_type == "standard mirror" and self.shrink_flag:
+ cmd = "shrinkvdisksize"
+ elif self.vdisk_type != "standard mirror" or self.vdisk_type != "local hyperswap":
+ self.module.fail_json(msg="The volume is not a mirror volume, Please use ibm_svc_manage_volume module for resizing standard volumes")
+ cmdopts["size"] = str(self.changebysize)
+ cmdopts["unit"] = "b"
+ self.restapi.svc_run_command(cmd, cmdopts, cmdargs=[self.name])
+ self.changed = True
+
+ def volume_create(self):
+ self.log("Entering function volume_create")
+ if not self.size:
+ self.module.fail_json(msg="You must pass in size to the module.")
+ if not self.type:
+ self.module.fail_json(msg="You must pass type to the module.")
+
+ self.log("creating Volume '%s'", self.name)
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ # Make command
+ cmd = 'mkvolume'
+ cmdopts = {}
+ if self.poolA and self.poolB:
+ cmdopts['pool'] = self.poolA + ":" + self.poolB
+ if self.size:
+ cmdopts['size'] = self.size
+ cmdopts['unit'] = "mb"
+ if self.grainsize:
+ cmdopts['grainsize'] = self.grainsize
+ if self.thin and self.rsize:
+ cmdopts['thin'] = self.thin
+ cmdopts['buffersize'] = self.rsize
+ elif self.thin:
+ cmdopts['thin'] = self.thin
+ elif self.rsize and not self.thin:
+ self.module.fail_json(msg="To configure 'rsize', parameter 'thin' should be passed and the value should be 'true'.")
+ if self.compressed:
+ cmdopts['compressed'] = self.compressed
+ if self.thin:
+ cmdopts['thin'] = self.thin
+ if self.deduplicated:
+ cmdopts['deduplicated'] = self.deduplicated
+ cmdopts['name'] = self.name
+ self.log("creating volume command %s opts %s", cmd, cmdopts)
+
+ # Run command
+ result = self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
+ self.log("create volume result %s", result)
+
+ if 'message' in result:
+ self.changed = True
+ self.log("create volume result message %s", result['message'])
+ else:
+ self.module.fail_json(
+ msg="Failed to create volume [%s]" % self.name)
+
+ def vdisk_create(self):
+ self.log("Entering function vdisk_create")
+ if not self.size:
+ self.module.fail_json(msg="You must pass in size to the module.")
+ if not self.type:
+ self.module.fail_json(msg="You must pass type to the module.")
+
+ self.log("creating Volume '%s'", self.name)
+ # Make command
+ cmd = 'mkvdisk'
+ cmdopts = {}
+ if self.poolA and self.poolB:
+ cmdopts['mdiskgrp'] = self.poolA + ":" + self.poolB
+ if self.size:
+ cmdopts['size'] = self.size
+ cmdopts['unit'] = "mb"
+ if self.compressed:
+ cmdopts['compressed'] = self.compressed
+ if self.thin and self.rsize:
+ cmdopts['rsize'] = self.rsize
+ elif self.thin:
+ cmdopts['rsize'] = "2%"
+ elif self.rsize and not self.thin:
+ self.module.fail_json(msg="To configure 'rsize', parameter 'thin' should be passed and the value should be 'true.'")
+ if self.grainsize:
+ cmdopts['grainsize'] = self.grainsize
+ if self.deduplicated:
+ if self.thin:
+ cmdopts['autoexpand'] = True
+ cmdopts['deduplicated'] = self.deduplicated
+ else:
+ self.module.fail_json(msg="To configure 'deduplicated', parameter 'thin' should be passed and the value should be 'true.'")
+ cmdopts['name'] = self.name
+ cmdopts['copies'] = 2
+ if self.isdrp and self.thin:
+ cmdopts['autoexpand'] = True
+ self.log("creating volume command %s opts %s", cmd, cmdopts)
+
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ # Run command
+ result = self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
+ self.log("create volume result %s", result)
+
+ if 'message' in result:
+ self.changed = True
+ self.log("create volume result message %s", result['message'])
+ else:
+ self.module.fail_json(
+ msg="Failed to create Volume [%s]" % self.name)
+
+ def addvolumecopy(self):
+ self.log("Entering function addvolumecopy")
+ cmd = 'addvolumecopy'
+ cmdopts = {}
+ if self.compressed:
+ cmdopts['compressed'] = self.compressed
+ if self.grainsize:
+ cmdopts['grainsize'] = self.grainsize
+ if self.thin and self.rsize:
+ cmdopts['thin'] = self.thin
+ cmdopts['buffersize'] = self.rsize
+ elif self.thin:
+ cmdopts['thin'] = self.thin
+ elif self.rsize and not self.thin:
+ self.module.fail_json(msg="To configure 'rsize', parameter 'thin' should be passed and the value should be 'true'.")
+ if self.deduplicated:
+ cmdopts['deduplicated'] = self.deduplicated
+ if self.size:
+ self.module.fail_json(msg="Parameter 'size' cannot be passed while converting a standard volume to Mirror Volume")
+ if self.poolA and (self.poolB == self.discovered_standard_vol_pool and self.poolA != self.discovered_standard_vol_pool):
+ cmdopts['pool'] = self.poolA
+ elif self.poolB and (self.poolA == self.discovered_standard_vol_pool and self.poolB != self.discovered_standard_vol_pool):
+ cmdopts['pool'] = self.poolB
+
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ cmdargs = [self.name]
+ self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
+
+ def addvdiskcopy(self):
+ self.log("Entering function addvdiskcopy")
+ cmd = 'addvdiskcopy'
+ cmdopts = {}
+ if self.size:
+ self.module.fail_json(msg="Parameter 'size' cannot be passed while converting a standard volume to Mirror Volume")
+ siteA, siteB = self.discover_site_from_pools()
+ if siteA != siteB:
+ self.module.fail_json(msg="To create Standard Mirrored volume, provide pools belonging to same site.")
+ if self.poolA and (self.poolB == self.discovered_standard_vol_pool and self.poolA != self.discovered_standard_vol_pool):
+ cmdopts['mdiskgrp'] = self.poolA
+ elif self.poolB and (self.poolA == self.discovered_standard_vol_pool and self.poolB != self.discovered_standard_vol_pool):
+ cmdopts['mdiskgrp'] = self.poolB
+ else:
+ self.module.fail_json(msg="One of the input pools must belong to the volume")
+ if self.compressed:
+ cmdopts['compressed'] = self.compressed
+ if self.grainsize:
+ cmdopts['grainsize'] = self.grainsize
+ if self.thin and self.rsize:
+ cmdopts['rsize'] = self.rsize
+ elif self.thin:
+ cmdopts['rsize'] = "2%"
+ elif self.rsize and not self.thin:
+ self.module.fail_json(msg="To configure 'rsize', parameter 'thin' should be passed and the value should be 'true'.")
+ if self.deduplicated:
+ if self.thin:
+ cmdopts['deduplicated'] = self.deduplicated
+ cmdopts['autoexpand'] = True
+ else:
+ self.module.fail_json(msg="To configure 'deduplicated', parameter 'thin' should be passed and the value should be 'true.'")
+ if self.isdrp and self.thin:
+ cmdopts['autoexpand'] = True
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ cmdargs = [self.name]
+ self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
+
+ def rmvolumecopy(self):
+ self.log("Entering function rmvolumecopy")
+ cmd = 'rmvolumecopy'
+
+ if self.size or self.thin or self.deduplicated or self.rsize or self.grainsize or self.compressed:
+ self.module.fail_json(msg="Parameter 'size', 'thin', 'deduplicated', 'rsize', 'grainsize' or 'compressed' \
+cannot be passed while converting a Mirror Volume to Standard.")
+
+ if self.module.check_mode:
+ self.changed = True
+ return
+ cmdopts = {}
+ if not self.poolA:
+ if (self.poolB != self.discovered_poolA):
+ cmdopts['pool'] = self.discovered_poolA
+ else:
+ cmdopts['pool'] = self.discovered_poolB
+ elif not self.poolB:
+ if (self.poolA != self.discovered_poolB):
+ cmdopts['pool'] = self.discovered_poolB
+ else:
+ cmdopts['pool'] = self.discovered_poolA
+ cmdargs = [self.name]
+ self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
+
+ def vdisk_update(self, modify):
+ self.log("Entering function vdisk_update")
+ if 'addvdiskcopy' in modify and 'resizevolume' in modify:
+ self.module.fail_json(msg="You cannot resize the volume alongwith converting the volume to Standard Mirror")
+ if 'addvolumecopy' in modify and 'resizevolume' in modify:
+ self.module.fail_json(msg="You cannot resize the volume alongwith converting the volume to Local HyperSwap")
+ if 'rmvolumecopy' in modify and 'resizevolume' in modify:
+ self.module.fail_json(msg="You cannot resize the volume alongwith converting the Mirror volume to Standard")
+ if 'addvolumecopy' in modify:
+ self.addvolumecopy()
+ elif 'addvdiskcopy' in modify:
+ self.isdrpool()
+ self.addvdiskcopy()
+ elif 'rmvolumecopy' in modify:
+ self.rmvolumecopy()
+ elif 'resizevolume' in modify:
+ self.resizevolume()
+
+ def isdrpool(self):
+ poolA_drp = self.poolA_data['data_reduction']
+ poolB_drp = self.poolB_data['data_reduction']
+ isdrpool_list = [poolA_drp, poolB_drp]
+ if "yes" in isdrpool_list:
+ self.isdrp = True
+
+ def volume_delete(self):
+ self.log("Entering function volume_delete")
+ self.log("deleting volume '%s'", self.name)
+
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ cmd = 'rmvolume'
+ cmdopts = None
+ cmdargs = [self.name]
+
+ self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
+
+ # Any error will have been raised in svc_run_command
+ # rmvolume does not output anything when successful.
+ self.changed = True
+
+ def discover_system_topology(self):
+ self.log("Entering function discover_system_topology")
+ system_data = self.restapi.svc_obj_info(cmd='lssystem', cmdopts=None, cmdargs=None)
+ sys_topology = system_data['topology']
+ return sys_topology
+
+ def apply(self):
+ self.log("Entering function apply")
+ changed = False
+ msg = None
+ modify = []
+ vdisk_data = self.get_existing_vdisk()
+ # Perform basic checks and fail the module with appropriate error msg if requirements are not satisfied
+ self.basic_checks(vdisk_data)
+
+ # Discover System Topology
+ self.system_topology = self.discover_system_topology()
+ if self.system_topology == "standard" and self.type == "local hyperswap":
+ self.module.fail_json(msg="The system topology is Standard, HyperSwap actions are not supported.")
+
+ if vdisk_data:
+ if self.state == 'absent':
+ self.log("CHANGED: volume exists, but requested state is 'absent'")
+ changed = True
+ elif self.state == 'present':
+ # Discover the existing vdisk type.
+ self.vdisk_type = self.discover_vdisk_type(vdisk_data)
+ # Check if there is change in configuration
+ modify = self.vdisk_probe(vdisk_data)
+ if modify:
+ changed = True
+ else:
+ if self.state == 'present':
+ if self.poolA and self.poolB:
+ self.log("CHANGED: volume does not exist, but requested state is 'present'")
+ changed = True
+ else:
+ self.module.fail_json(msg="Volume does not exist, To create a Mirrored volume (standard mirror or HyperSwap), \
+You must pass in poolA and poolB to the module.")
+
+ if changed:
+ if self.state == 'present':
+ if not vdisk_data:
+ if not self.type:
+ self.module.fail_json(msg="missing required argument: type")
+ # create_vdisk_flag = self.discover_site_from_pools()
+ if self.type == "standard":
+ self.isdrpool()
+ self.vdisk_create()
+ msg = "Standard Mirrored Volume %s has been created." % self.name
+ changed = True
+ elif self.type == "local hyperswap":
+ # if not create_vdisk_flag:
+ self.volume_create()
+ msg = "HyperSwap Volume %s has been created." % self.name
+ changed = True
+ else:
+ # This is where we would modify if required
+ self.vdisk_update(modify)
+ msg = "Volume [%s] has been modified." % self.name
+ changed = True
+ elif self.state == 'absent':
+ self.volume_delete()
+ msg = "Volume [%s] has been deleted." % self.name
+ changed = True
+
+ if self.module.check_mode:
+ msg = 'skipping changes due to check mode'
+ else:
+ self.log("exiting with no changes")
+ if self.state == 'absent':
+ msg = "Volume %s did not exist." % self.name
+ else:
+ msg = self.vdisk_type + " Volume [%s] already exists, no modifications done" % self.name
+
+ self.module.exit_json(msg=msg, changed=changed)
+
+
+def main():
+ v = IBMSVCvolume()
+ try:
+ v.apply()
+ except Exception as e:
+ v.log("Exception in apply(): \n%s", format_exc())
+ v.module.fail_json(msg="Module failed. Error [%s]." % to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_ownershipgroup.py b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_ownershipgroup.py
new file mode 100644
index 000000000..40147cd53
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_ownershipgroup.py
@@ -0,0 +1,244 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2021 IBM CORPORATION
+# Author(s): Sanjaikumaar <sanjaikumaar.m@ibm.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_svc_manage_ownershipgroup
+short_description: This module manages ownership group on IBM Storage Virtualize family systems
+version_added: "1.7.0"
+description:
+ - Ansible interface to manage 'mkownershipgroup' and 'rmownershipgroup' commands.
+options:
+ name:
+ description:
+ - Specifies the name or label for the new ownership group object.
+ required: true
+ type: str
+ state:
+ description:
+ - Creates (C(present)) or removes (C(absent)) an ownership group.
+ choices: [ absent, present ]
+ required: true
+ type: str
+ clustername:
+ description:
+ - The hostname or management IP of the Storage Virtualize system.
+ required: true
+ type: str
+ domain:
+ description:
+ - Domain for the Storage Virtualize system.
+ - Valid when hostname is used for the parameter I(clustername).
+ type: str
+ username:
+ description:
+ - REST API username for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ password:
+ description:
+ - REST API password for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ token:
+ description:
+ - The authentication token to verify a user on the Storage Virtualize system.
+ - To generate a token, use the M(ibm.storage_virtualize.ibm_svc_auth) module.
+ type: str
+ keepobjects:
+ description:
+ - If specified, the objects that currently belong to the ownership group will be kept but will be moved to noownershipgroup.
+ - Applies when I(state=disabled).
+ type: bool
+ log_path:
+ description:
+ - Path of debug log file.
+ type: str
+ validate_certs:
+ description:
+ - Validates certification.
+ default: false
+ type: bool
+author:
+ - Sanjaikumaar M (@sanjaikumaar)
+notes:
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = '''
+- name: Create ownership group
+ ibm.storage_virtualize.ibm_svc_manage_ownershipgroup:
+ clustername: "{{ clustername }}"
+ domain: "{{ domain }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ log_path: /tmp/playbook.debug
+ name: newOwner
+ state: present
+- name: Delete ownership group
+ ibm.storage_virtualize.ibm_svc_manage_ownershipgroup:
+ clustername: "{{ clustername }}"
+ domain: "{{ domain }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ log_path: /tmp/playbook.debug
+ name: newOwner
+ state: absent
+ keepobjects: true
+'''
+
+RETURN = '''#'''
+
+from traceback import format_exc
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import (
+ IBMSVCRestApi,
+ svc_argument_spec,
+ get_logger
+)
+
+
+class IBMSVCOwnershipgroup:
+
+ def __init__(self):
+ # Gathering required arguments for the module
+ argument_spec = svc_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type='str', required=True),
+ state=dict(
+ type='str',
+ required=True,
+ choices=['present', 'absent']
+ ),
+ keepobjects=dict(type='bool')
+ )
+ )
+
+ # Initializing ansible module
+ self.module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ # Required parameters
+ self.name = self.module.params['name']
+ self.state = self.module.params['state']
+
+ # Optional parameters
+ self.keepobjects = self.module.params.get('keepobjects')
+
+ if not self.name:
+ self.module.fail_json(msg='Missing mandatory parameter: name')
+
+ # logging setup
+ log_path = self.module.params['log_path']
+ logger = get_logger(self.__class__.__name__, log_path)
+ self.log = logger.info
+ self.changed = False
+ self.msg = None
+
+ self.restapi = IBMSVCRestApi(
+ module=self.module,
+ clustername=self.module.params['clustername'],
+ domain=self.module.params['domain'],
+ username=self.module.params['username'],
+ password=self.module.params['password'],
+ validate_certs=self.module.params['validate_certs'],
+ log_path=log_path,
+ token=self.module.params['token']
+ )
+
+ def check_existing_owgroups(self):
+ merged_result = {}
+
+ data = self.restapi.svc_obj_info(cmd='lsownershipgroup', cmdopts=None,
+ cmdargs=[self.name])
+
+ if isinstance(data, list):
+ for d in data:
+ merged_result.update(d)
+ else:
+ merged_result = data
+
+ return merged_result
+
+ def create_ownershipgroup(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ if self.keepobjects:
+ self.module.fail_json(
+ msg='Keepobjects should only be passed while deleting ownershipgroup'
+ )
+
+ cmd = 'mkownershipgroup'
+ cmdopts = None
+ cmdargs = ['-name', self.name]
+
+ result = self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
+ self.changed = True
+ self.log('Create ownership group result: %s', result)
+
+ def delete_ownershipgroup(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ cmd = 'rmownershipgroup'
+ cmdopts = None
+ cmdargs = [self.name]
+
+ if self.keepobjects:
+ cmdargs.insert(0, '-keepobjects')
+
+ result = self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
+ self.changed = True
+ self.log('Delete ownership group result: %s', result)
+
+ def apply(self):
+ if self.check_existing_owgroups():
+ if self.state == 'present':
+ self.msg = 'Ownership group (%s) already exist.' % (self.name)
+ else:
+ self.delete_ownershipgroup()
+ self.msg = 'Ownership group (%s) deleted.' % (self.name)
+ else:
+ if self.state == 'absent':
+ self.msg = 'Ownership group (%s) does not exist.' % (self.name)
+ else:
+ self.create_ownershipgroup()
+ self.msg = 'Ownership group (%s) created.' % \
+ (self.name)
+
+ if self.module.check_mode:
+ self.msg = 'skipping changes due to check mode.'
+
+ self.module.exit_json(
+ changed=self.changed,
+ msg=self.msg
+ )
+
+
+def main():
+ v = IBMSVCOwnershipgroup()
+ try:
+ v.apply()
+ except Exception as e:
+ v.log('Exception in apply(): \n%s', format_exc())
+ v.module.fail_json(msg='Module failed. Error [{0}].'.format(to_native(e)))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_portset.py b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_portset.py
new file mode 100644
index 000000000..c22f79781
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_portset.py
@@ -0,0 +1,405 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2022 IBM CORPORATION
+# Author(s): Sanjaikumaar M <sanjaikumaar.m@ibm.com>
+# Sudheesh Reddy Satti<Sudheesh.Reddy.Satti@ibm.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_svc_manage_portset
+short_description: This module manages portset configuration on IBM Storage Virtualize family systems
+version_added: "1.8.0"
+description:
+ - Ansible interface to manage IP and Fibre Channel (FC) portsets using 'mkportset', 'chportset', and 'rmportset' commands.
+options:
+ clustername:
+ description:
+ - The hostname or management IP of the Storage Virtualize system.
+ required: true
+ type: str
+ domain:
+ description:
+ - Domain for the Storage Virtualize system.
+ - Valid when hostname is used for the parameter I(clustername).
+ type: str
+ username:
+ description:
+ - REST API username for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ password:
+ description:
+ - REST API password for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ token:
+ description:
+ - The authentication token to verify a user on the Storage Virtualize system.
+ - To generate a token, use the M(ibm.storage_virtualize.ibm_svc_auth) module.
+ type: str
+ log_path:
+ description:
+ - Path of debug log file.
+ type: str
+ state:
+ description:
+ - Creates (C(present)) or Deletes (C(absent)) the IP portset.
+ choices: [ present, absent ]
+ required: true
+ type: str
+ name:
+ description:
+ - Specifies the name of portset.
+ type: str
+ required: true
+ porttype:
+ description:
+ - Specifies the type of port that can be mapped to the portset.
+ - Applies when I(state=present).
+ - If not specified, I(porttype=ethernet) will be used to manage IP portset.
+ choices: [ fc, ethernet ]
+ type: str
+ version_added: '1.12.0'
+ portset_type:
+ description:
+ - Specifies the type for the portset.
+ - Applies only during creation of portset.
+ - If not specified, I(portset_type=host) will be used.
+ choices: [ host, replication ]
+ type: str
+ ownershipgroup:
+ description:
+ - The name of the ownership group to which the portset object is being mapped.
+ - Parameters I(ownershipgroup) and I(noownershipgroup) are mutually exclusive.
+ - Applies when I(state=present).
+ type: str
+ noownershipgroup:
+ description:
+ - Specify to remove the ownership group from portset.
+ - Parameters I(ownershipgroup) and I(noownershipgroup) are mutually exclusive.
+ - Applies only during updation of portset.
+ type: bool
+ old_name:
+ description:
+ - Specifies the old name of the portset while renaming.
+ - Valid when I(state=present), to rename an existing host.
+ type: str
+ version_added: '1.12.0'
+ validate_certs:
+ description:
+ - Validates certification.
+ default: false
+ type: bool
+author:
+ - Sanjaikumaar M (@sanjaikumaar)
+ - Sudheesh Reddy Satti (@sudheeshreddy)
+notes:
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = '''
+- name: Create a portset
+ ibm.storage_virtualize.ibm_svc_manage_portset:
+ clustername: "{{cluster}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ name: portset1
+ portset_type: host
+ ownershipgroup: owner1
+ state: present
+- name: Update a portset
+ ibm.storage_virtualize.ibm_svc_manage_portset:
+ clustername: "{{cluster}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ name: portset1
+ noownershipgroup: true
+ state: present
+- name: Create an FC portset
+ ibm.storage_virtualize.ibm_svc_manage_portset:
+ clustername: "{{cluster}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ name: fcportset1
+ porttype: fc
+ portset_type: host
+ ownershipgroup: owner1
+ state: present
+- name: Rename the portset
+ ibm.storage_virtualize.ibm_svc_manage_portset:
+ clustername: "{{cluster}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ name: portset2
+ old_name: portset1
+ state: present
+- name: Delete a portset
+ ibm.storage_virtualize.ibm_svc_manage_portset:
+ clustername: "{{cluster}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ name: portset1
+ state: absent
+'''
+
+RETURN = '''#'''
+
+from traceback import format_exc
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import (
+ IBMSVCRestApi, svc_argument_spec,
+ get_logger
+)
+from ansible.module_utils._text import to_native
+
+
+class IBMSVCPortset:
+
+ def __init__(self):
+ argument_spec = svc_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(
+ type='str',
+ required=True,
+ choices=['present', 'absent']
+ ),
+ name=dict(
+ type='str',
+ required=True,
+ ),
+ portset_type=dict(
+ type='str',
+ choices=['host', 'replication']
+ ),
+ ownershipgroup=dict(
+ type='str',
+ ),
+ noownershipgroup=dict(
+ type='bool',
+ ),
+ porttype=dict(
+ type='str',
+ choices=['fc', 'ethernet']
+ ),
+ old_name=dict(
+ type='str',
+ )
+ )
+ )
+
+ self.module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ # Required parameters
+ self.name = self.module.params['name']
+ self.state = self.module.params['state']
+ # Optional parameters
+ self.portset_type = self.module.params.get('portset_type', '')
+ self.ownershipgroup = self.module.params.get('ownershipgroup', '')
+ self.noownershipgroup = self.module.params.get('noownershipgroup', '')
+ self.porttype = self.module.params.get('porttype', '')
+ self.old_name = self.module.params.get('old_name', '')
+
+ self.basic_checks()
+
+ # Varialbe to cache data
+ self.portset_details = None
+
+ # logging setup
+ self.log_path = self.module.params['log_path']
+ log = get_logger(self.__class__.__name__, self.log_path)
+ self.log = log.info
+ self.changed = False
+ self.msg = ''
+
+ self.restapi = IBMSVCRestApi(
+ module=self.module,
+ clustername=self.module.params['clustername'],
+ domain=self.module.params['domain'],
+ username=self.module.params['username'],
+ password=self.module.params['password'],
+ validate_certs=self.module.params['validate_certs'],
+ log_path=self.log_path,
+ token=self.module.params['token']
+ )
+
+ def basic_checks(self):
+ if self.state == 'present':
+ if not self.name:
+ self.module.fail_json(msg='Missing mandatory parameter: name')
+
+ if self.ownershipgroup and self.noownershipgroup:
+ self.module.fail_json(msg='Mutually exclusive parameter: ownershipgroup, noownershipgroup')
+
+ else:
+ if not self.name:
+ self.module.fail_json(msg='Missing mandatory parameter: name')
+
+ fields = [f for f in ['ownershipgroup', 'noownershipgroup', 'porttype', 'portset_type', 'old_name'] if getattr(self, f)]
+
+ if any(fields):
+ self.module.fail_json(msg='Parameters {0} not supported while deleting a porset'.format(', '.join(fields)))
+
+ # for validating parameter while renaming a portset
+ def parameter_handling_while_renaming(self):
+ parameters = {
+ "ownershipgroup": self.ownershipgroup,
+ "noownershipgroup": self.noownershipgroup,
+ "porttype": self.porttype,
+ "portset_type": self.portset_type
+ }
+ parameters_exists = [parameter for parameter, value in parameters.items() if value]
+ if parameters_exists:
+ self.module.fail_json(msg="Parameters {0} not supported while renaming a portset.".format(', '.join(parameters_exists)))
+
+ def is_portset_exists(self, portset_name):
+ merged_result = {}
+ data = self.restapi.svc_obj_info(
+ cmd='lsportset',
+ cmdopts=None,
+ cmdargs=[portset_name]
+ )
+
+ if isinstance(data, list):
+ for d in data:
+ merged_result.update(d)
+ else:
+ merged_result = data
+
+ self.portset_details = merged_result
+
+ return merged_result
+
+ def create_portset(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ cmd = 'mkportset'
+ cmdopts = {
+ 'name': self.name,
+ 'type': self.portset_type if self.portset_type else 'host',
+ 'porttype': self.porttype if self.porttype else 'ethernet'
+ }
+
+ if self.ownershipgroup:
+ cmdopts['ownershipgroup'] = self.ownershipgroup
+
+ self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
+ self.log('Portset (%s) created', self.name)
+ self.changed = True
+
+ def portset_probe(self):
+ updates = []
+
+ if self.portset_type and self.portset_type != self.portset_details['type']:
+ self.module.fail_json(msg="portset_type can't be updated for portset")
+ if self.porttype and self.porttype != self.portset_details['port_type']:
+ self.module.fail_json(msg="porttype can't be updated for portset")
+ if self.ownershipgroup and self.ownershipgroup != self.portset_details['owner_name']:
+ updates.append('ownershipgroup')
+ if self.noownershipgroup:
+ updates.append('noownershipgroup')
+
+ self.log("Modifications to be done: %s", updates)
+ return updates
+
+ def update_portset(self, updates):
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ cmd = 'chportset'
+ cmdopts = dict((k, getattr(self, k)) for k in updates)
+ cmdargs = [self.name]
+
+ self.restapi.svc_run_command(cmd, cmdopts=cmdopts, cmdargs=cmdargs)
+ self.log('Portset (%s) updated', self.name)
+ self.changed = True
+
+ def delete_portset(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ cmd = 'rmportset'
+ cmdargs = [self.name]
+
+ self.restapi.svc_run_command(cmd, cmdopts=None, cmdargs=cmdargs)
+ self.log('Portset (%s) deleted', self.name)
+ self.changed = True
+
+ # function for renaming an existing portset with a new name
+ def portset_rename(self, portset_data):
+ msg = ''
+ self.parameter_handling_while_renaming()
+ old_portset_data = self.is_portset_exists(self.old_name)
+ if not old_portset_data and not portset_data:
+ self.module.fail_json(msg="Portset with old name {0} doesn't exist.".format(self.old_name))
+ elif old_portset_data and portset_data:
+ self.module.fail_json(msg="Portset [{0}] already exists.".format(self.name))
+ elif not old_portset_data and portset_data:
+ msg = "Portset with name [{0}] already exists.".format(self.name)
+ elif old_portset_data and not portset_data:
+ # when check_mode is enabled
+ if self.module.check_mode:
+ self.changed = True
+ return
+ self.restapi.svc_run_command('chportset', {'name': self.name}, [self.old_name])
+ self.changed = True
+ msg = "Portset [{0}] has been successfully rename to [{1}].".format(self.old_name, self.name)
+ return msg
+
+ def apply(self):
+
+ portset_data = self.is_portset_exists(self.name)
+
+ if self.state == 'present' and self.old_name:
+ self.msg = self.portset_rename(portset_data)
+ elif self.state == 'absent' and self.old_name:
+ self.module.fail_json(msg="Rename functionality is not supported when 'state' is absent.")
+ else:
+ if portset_data:
+ if self.state == 'present':
+ modifications = self.portset_probe()
+ if any(modifications):
+ self.update_portset(modifications)
+ self.msg = 'Portset ({0}) updated.'.format(self.name)
+ else:
+ self.msg = 'Portset ({0}) already exists. No modifications done.'.format(self.name)
+ else:
+ self.delete_portset()
+ self.msg = 'Portset ({0}) deleted successfully.'.format(self.name)
+ else:
+ if self.state == 'absent':
+ self.msg = 'Portset ({0}) does not exist. No modifications done.'.format(self.name)
+ else:
+ self.create_portset()
+ self.msg = 'Portset ({0}) created successfully.'.format(self.name)
+
+ if self.module.check_mode:
+ self.msg = 'skipping changes due to check mode.'
+
+ self.module.exit_json(
+ changed=self.changed,
+ msg=self.msg
+ )
+
+
+def main():
+ v = IBMSVCPortset()
+ try:
+ v.apply()
+ except Exception as e:
+ v.log("Exception in apply(): \n%s", format_exc())
+ v.module.fail_json(msg="Module failed. Error [%s]." % to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_replication.py b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_replication.py
new file mode 100644
index 000000000..23b1737e9
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_replication.py
@@ -0,0 +1,544 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2020 IBM CORPORATION
+# Author(s): Rohit Kumar <rohit.kumar6@ibm.com>
+# Shilpi Jain <shilpi.jain1@ibm.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_svc_manage_replication
+short_description: This module manages remote copies (or rcrelationship) on
+ IBM Storage Virtualize family systems
+version_added: "1.3.0"
+
+description:
+ - Ansible interface to manage remote copy replication.
+
+options:
+ name:
+ description:
+ - Specifies the name to assign to the new remote copy relationship or to operate on the existing remote copy.
+ type: str
+ state:
+ description:
+ - Creates or updates (C(present)), removes (C(absent)) a
+ remote copy relationship.
+ choices: [absent, present]
+ required: true
+ type: str
+ clustername:
+ description:
+ - The hostname or management IP of the Storage Virtualize system.
+ type: str
+ required: true
+ domain:
+ description:
+ - Domain for the Storage Virtualize system.
+ - Valid when hostname is used for the parameter I(clustername).
+ type: str
+ username:
+ description:
+ - REST API username for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ password:
+ description:
+ - REST API password for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ token:
+ description:
+ - The authentication token to verify a user on the Storage Virtualize system.
+ - To generate a token, use the ibm_svc_auth module.
+ type: str
+ version_added: '1.5.0'
+ copytype:
+ description:
+ - Specifies the mirror type of the remote copy. 'metro' means MetroMirror,
+ 'global' means GlobalMirror, and 'GMCV' means GlobalMirror with change volume.
+ - If not specified, a MetroMirror remote copy will be created when creating a remote copy I(state=present).
+ type: str
+ choices: [ 'metro', 'global' , 'GMCV']
+ master:
+ description:
+ - Specifies the master volume name when creating a remote copy.
+ type: str
+ aux:
+ description:
+ - Specifies the auxiliary volume name when creating a remote copy.
+ type: str
+ cyclingperiod:
+ description:
+ - Specifies the cycle period in seconds. The default cycle is of 300 seconds.
+ type: int
+ remotecluster:
+ description:
+ - Specifies the name of remote cluster when creating a remote copy.
+ type: str
+ sync:
+ description:
+ - Specifies whether to create a synchronized relationship.
+ default: false
+ type: bool
+ force:
+ description:
+ - Specifies that the relationship must be deleted even if it results in the secondary volume containing inconsistent data.
+ type: bool
+ consistgrp:
+ description:
+ - Specifies a consistency group that this relationship will join. If not specified by user, the relationship is created as a stand-alone relationship.
+ - Applies when I(state=present).
+ type: str
+ noconsistgrp:
+ description:
+ - Specifies whether to remove the specified relationship from a consistency
+ group, making the relationship a stand-alone relationship.
+ - Applies when I(state=present).
+ default: false
+ type: bool
+ validate_certs:
+ description:
+ - Validates certification.
+ default: false
+ type: bool
+ log_path:
+ description:
+ - Path of debug log file.
+ type: str
+notes:
+ - The parameters I(primary) and I(aux) are mandatory only when a remote copy relationship does not exist.
+ - This module supports C(check_mode).
+author:
+ - rohit(@rohitk-github)
+ - Shilpi Jain (@Shilpi-Jain1)
+'''
+
+EXAMPLES = '''
+- name: Create remote copy
+ ibm.storage_virtualize.ibm_svc_manage_replication:
+ name: sample_rcopy
+ clustername: "{{clustername}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ log_path: /tmp/ansible.log
+ state: present
+ remotecluster: "{{remotecluster}}"
+ master: SourceVolume0
+ aux: TargetVolume0
+ copytype: global
+ sync: true
+ consistgrp: sample_rccg
+ register: result
+- name: Exclude the remote copy from consistency group
+ ibm.storage_virtualize.ibm_svc_manage_replication:
+ name: sample_rcopy2
+ clustername: "{{clustername}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ log_path: /tmp/ansible.log
+ state: present
+ noconsistgrp: true
+- name: Delete remote copy
+ ibm.storage_virtualize.ibm_svc_manage_replication:
+ name: sample_rcopy3
+ clustername: "{{clustername}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ log_path: /tmp/ansible.log
+ state: absent
+- name: Create GlobalMirror remote copy relationship with change volume
+ ibm.storage_virtualize.ibm_svc_manage_replication:
+ name: sample_rcopy4
+ clustername: "{{clustername}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ log_path: /tmp/ansible.log
+ state: present
+ remotecluster: "{{remotecluster}}"
+ master: SourceVolume1
+ aux: TargetVolume1
+ copytype: GMCV
+ sync: true
+ register: result
+'''
+
+RETURN = '''#'''
+
+
+from ansible.module_utils._text import to_native
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi, svc_argument_spec, get_logger
+from ansible.module_utils.basic import AnsibleModule
+from traceback import format_exc
+
+
+class IBMSVCManageReplication(object):
+ def __init__(self):
+ argument_spec = svc_argument_spec()
+
+ argument_spec.update(
+ dict(
+ name=dict(type='str'),
+ state=dict(type='str',
+ required=True,
+ choices=['present', 'absent']),
+ remotecluster=dict(type='str'),
+ copytype=dict(type='str', choices=['metro', 'global', 'GMCV']),
+ master=dict(type='str'),
+ aux=dict(type='str'),
+ force=dict(type='bool', required=False),
+ consistgrp=dict(type='str'),
+ noconsistgrp=dict(type='bool', default=False),
+ sync=dict(type='bool', default=False),
+ cyclingperiod=dict(type='int')
+ )
+ )
+
+ self.module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ # logging setup
+ log_path = self.module.params['log_path']
+ log = get_logger(self.__class__.__name__, log_path)
+ self.log = log.info
+
+ # Required
+ self.name = self.module.params['name']
+ self.state = self.module.params['state']
+ self.remotecluster = self.module.params['remotecluster']
+
+ # Optional
+ self.consistgrp = self.module.params.get('consistgrp', None)
+ self.aux = self.module.params.get('aux')
+ self.master = self.module.params.get('master')
+ self.sync = self.module.params.get('sync', False)
+ self.noconsistgrp = self.module.params.get('noconsistgrp', False)
+ self.copytype = self.module.params.get('copytype', None)
+ self.force = self.module.params.get('force', False)
+ self.cyclingperiod = self.module.params.get('cyclingperiod')
+
+ # Handling missing mandatory parameter name
+ if not self.name:
+ self.module.fail_json(msg='Missing mandatory parameter: name')
+
+ self.restapi = IBMSVCRestApi(
+ module=self.module,
+ clustername=self.module.params['clustername'],
+ domain=self.module.params['domain'],
+ username=self.module.params['username'],
+ password=self.module.params['password'],
+ validate_certs=self.module.params['validate_certs'],
+ log_path=log_path,
+ token=self.module.params['token']
+ )
+
+ def existing_vdisk(self, volname):
+ merged_result = {}
+
+ data = self.restapi.svc_obj_info(cmd='lsvdisk', cmdopts={'bytes': True},
+ cmdargs=[volname])
+
+ if not data:
+ self.log("source volume %s does not exist", volname)
+ return
+ if isinstance(data, list):
+ for d in data:
+ merged_result.update(d)
+ else:
+ merged_result = data
+
+ return merged_result
+
+ def cycleperiod_update(self):
+ """
+ Use the chrcrelationship command to update cycling period in remote copy
+ relationship.
+ """
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ if (self.copytype == 'GMCV') and (self.cyclingperiod):
+ cmd = 'chrcrelationship'
+ cmdopts = {}
+ cmdopts['cycleperiodseconds'] = self.cyclingperiod
+ cmdargs = [self.name]
+
+ self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
+ else:
+ self.log("not updating chrcrelationship with cyclingperiod %s", self.cyclingperiod)
+
+ def cyclemode_update(self):
+ """
+ Use the chrcrelationship command to update cycling mode in remote copy
+ relationship.
+ """
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ cmd = 'chrcrelationship'
+ cmdopts = {}
+ cmdargs = [self.name]
+
+ if self.copytype == 'GMCV':
+ self.log("updating chrcrelationship with cyclingmode multi")
+ cmdopts['cyclingmode'] = 'multi'
+ else:
+ self.log("updating chrcrelationship with no cyclingmode")
+ cmdopts['cyclingmode'] = 'none'
+
+ self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
+
+ def existing_rc(self):
+ """
+ find the remote copy relationships such as Metro Mirror, Global Mirror
+ relationships visible to the system.
+
+ Returns:
+ None if no matching instances or a list including all the matching
+ instances
+ """
+ self.log('Trying to get the remote copy relationship %s', self.name)
+ data = self.restapi.svc_obj_info(cmd='lsrcrelationship',
+ cmdopts=None, cmdargs=[self.name])
+
+ return data
+
+ def rcrelationship_probe(self, data):
+ props = {}
+ propscv = {}
+ if data['consistency_group_name'] and self.noconsistgrp:
+ props['noconsistgrp'] = self.noconsistgrp
+ if self.consistgrp is not None and self.consistgrp != data['consistency_group_name']:
+ props['consistgrp'] = self.consistgrp
+ if self.master is not None and self.master != data['master_vdisk_name']:
+ props['master'] = self.master
+ if self.aux is not None and self.aux != data['aux_vdisk_name']:
+ props['aux'] = self.aux
+ if self.copytype == 'global' and data['copy_type'] == 'metro':
+ props['global'] = True
+
+ if (self.copytype == 'metro' or self.copytype is None) and (data['copy_type'] == 'global' and data['cycling_mode'] == 'multi'):
+ self.module.fail_json(msg="Changing relationship type from GMCV to metro is not allowed")
+ elif (self.copytype == 'metro' or self.copytype is None) and data['copy_type'] == 'global':
+ props['metro'] = True
+
+ if self.copytype == 'GMCV' and data['copy_type'] == 'global' and self.consistgrp is None:
+ if data['cycling_mode'] != 'multi':
+ propscv['cyclingmode'] = 'multi'
+ if self.cyclingperiod is not None and self.cyclingperiod != int(data['cycle_period_seconds']):
+ propscv['cycleperiodseconds'] = self.cyclingperiod
+ if self.copytype == 'global' and (data['copy_type'] == 'global' and (data['master_change_vdisk_name'] or data['aux_change_vdisk_name'])):
+ propscv['cyclingmode'] = 'none'
+ if self.copytype == 'GMCV' and data['copy_type'] == 'metro':
+ self.module.fail_json(msg="Changing relationship type from metro to GMCV is not allowed")
+ if self.copytype != 'metro' and self.copytype != 'global' and self.copytype != 'GMCV' and self.copytype is not None:
+ self.module.fail_json(msg="Unsupported mirror type: %s. Only 'global', 'metro' and 'GMCV' are supported when modifying" % self.copytype)
+
+ return props, propscv
+
+ def rcrelationship_update(self, modify, modifycv):
+ """
+ Use the chrcrelationship command to modify certain attributes of an
+ existing relationship, such as to add a relationship to a consistency
+ group to remove a relationship from a consistency group.
+ You can change one attribute at a time.
+ """
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ if modify:
+ self.log("updating chrcrelationship with properties %s", modify)
+ cmd = 'chrcrelationship'
+ cmdopts = {}
+ for prop in modify:
+ cmdopts[prop] = modify[prop]
+ cmdargs = [self.name]
+
+ self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
+
+ # Error(if any) will be raised in svc_run_command
+ self.changed = True
+ if modifycv:
+ if 'cycleperiodseconds' in modifycv:
+ self.cycleperiod_update()
+ self.log("cyclingperiod in change volume updated")
+ if 'cyclingmode' in modifycv:
+ self.cyclemode_update()
+ self.log("cyclingmode in change volume updated")
+ # Error(if any) will be raised in svc_run_command
+ self.changed = True
+ if not modify and not modifycv:
+ self.log("There is no property need to be updated")
+ self.changed = False
+
+ def create(self):
+ """
+ Specify the mkrcrelationship command to create a new Global Mirror,
+ Metro Mirror in the same system, forming an intrasystem Metro Mirror
+ relationship or intersystem
+ relationship (if it involves more than one system).
+
+ Returns:
+ a remote copy instance
+ """
+ if not self.name:
+ self.module.fail_json(msg="You must pass in name to the module.")
+ if not self.master:
+ self.module.fail_json(msg="You must pass in master to the module.")
+ if not self.aux:
+ self.module.fail_json(msg="You must pass in aux to the module.")
+ if not self.remotecluster:
+ self.module.fail_json(msg="You must pass in remotecluster to the module.")
+
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ self.log("Creating remote copy '%s'", self.name)
+
+ # Make command
+ cmd = 'mkrcrelationship'
+ cmdopts = {}
+ if self.remotecluster:
+ cmdopts['cluster'] = self.remotecluster
+ if self.master:
+ cmdopts['master'] = self.master
+ if self.aux:
+ cmdopts['aux'] = self.aux
+ if self.name:
+ cmdopts['name'] = self.name
+
+ if self.copytype:
+ if self.copytype == 'global' or self.copytype == 'GMCV':
+ cmdopts['global'] = True
+ if self.copytype == 'GMCV':
+ cmdopts['cyclingmode'] = 'multi'
+ elif self.copytype == 'metro' or self.copytype == 'blank':
+ pass
+ else:
+ msg = "Invalid parameter specified as the Copy Type(%s) when creating Remotecopy" % self.copytype
+ self.module.fail_json(msg=msg)
+
+ if self.copytype != 'GMCV' and self.cyclingperiod is not None:
+ msg = "Provided copytype is %s. Copy Type must be GMCV when creating Remotecopy relationship with change volumes and cycling period" % self.copytype
+ self.module.fail_json(msg=msg)
+
+ if self.consistgrp:
+ cmdopts['consistgrp'] = self.consistgrp
+ if self.sync:
+ cmdopts['sync'] = self.sync
+
+ # Run command
+ self.log("Command %s opts %s", cmd, cmdopts)
+ result = self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
+ self.log("create remote copy result %s", result)
+
+ if 'message' in result:
+ self.changed = True
+ data = self.existing_rc()
+ self.log("Succeeded to create remote copy result message %s",
+ result['message'])
+ return data
+ else:
+ msg = "Failed to create remote copy [%s]" % self.name
+ self.module.fail_json(msg=msg)
+
+ def delete(self):
+ """
+ Use the rmrcrelationship command to delete an existing remote copy
+ relationship.
+ """
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ cmd = 'rmrcrelationship'
+ cmdopts = {}
+ if self.force:
+ cmdopts['force'] = self.force
+ cmdargs = [self.name]
+
+ result = self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
+
+ # Any error will have been raised in svc_run_command
+ # Command does not output anything when successful.
+ if result == '':
+ self.changed = True
+ self.log("succeeded to delete the remote copy %s", self.name)
+ elif 'message' in result:
+ self.changed = True
+ self.log("delete the remote copy %s with result message %s",
+ self.name, result['message'])
+ else:
+ self.module.fail_json(
+ msg="Failed to delete the remote copy [%s]" % self.name)
+
+ def apply(self):
+ changed = False
+ msg = None
+ modify = {}
+ modifycv = {}
+ rcrelationship_data = self.existing_rc()
+ if rcrelationship_data:
+ if self.state == 'absent':
+ self.log(
+ "CHANGED: RemoteCopy relationship exists, requested state is 'absent'")
+ changed = True
+ elif self.state == 'present':
+ modify, modifycv = self.rcrelationship_probe(rcrelationship_data)
+ if modify or modifycv:
+ changed = True
+ else:
+ if self.state == 'present':
+ changed = True
+ self.log(
+ "CHANGED: Remotecopy relationship does not exist, but requested state is '%s'", self.state)
+
+ if changed:
+ if self.state == 'present':
+ if not rcrelationship_data:
+ self.create()
+ if self.copytype == 'GMCV' and self.consistgrp is None:
+ self.cycleperiod_update()
+ self.cyclemode_update()
+ msg = "remote copy relationship with change volume %s has been created." % self.name
+ else:
+ msg = "remote copy relationship %s has been created." % self.name
+ else:
+ self.rcrelationship_update(modify, modifycv)
+ msg = "remote copy relationship [%s] has been modified." % self.name
+ elif self.state == 'absent':
+ self.delete()
+ msg = "remote copy relationship [%s] has been deleted." % self.name
+
+ if self.module.check_mode:
+ msg = 'skipping changes due to check mode.'
+ else:
+ self.log("exiting with no changes")
+ if self.state in ['absent']:
+ msg = "Remotecopy relationship [%s] does not exist." % self.name
+ else:
+ msg = "No Modifications detected, Remotecopy relationship [%s] already exists." % self.name
+
+ self.module.exit_json(msg=msg, changed=changed)
+
+
+def main():
+ v = IBMSVCManageReplication()
+ try:
+ v.apply()
+ except Exception as e:
+ v.log("Exception in apply(): \n%s", format_exc())
+ v.module.fail_json(msg="Module failed. Error [%s]." % to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_replicationgroup.py b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_replicationgroup.py
new file mode 100644
index 000000000..cf5c52f6c
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_replicationgroup.py
@@ -0,0 +1,379 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2020 IBM CORPORATION
+# Author(s): Rohit Kumar <rohit.kumar6@ibm.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_svc_manage_replicationgroup
+short_description: This module manages remote copy consistency group on
+ IBM Storage Virtualize family systems
+version_added: "1.3.0"
+description:
+ - Ansible interface to manage 'mkrcconsistgrp', 'chrcconsistgrp', and 'rmrcconsistgrp'
+ remote copy consistency group commands.
+options:
+ name:
+ description:
+ - Specifies the name for the new consistency group.
+ required: true
+ type: str
+ state:
+ description:
+ - Creates or updates (C(present)) removes (C(absent))
+ a consistency group.
+ choices: [ absent, present ]
+ required: true
+ type: str
+ clustername:
+ description:
+ - The hostname or management IP of the Storage Virtualize system.
+ type: str
+ required: true
+ domain:
+ description:
+ - Domain for the Storage Virtualize system.
+ - Valid when hostname is used for the parameter I(clustername).
+ type: str
+ username:
+ description:
+ - REST API username for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ password:
+ description:
+ - REST API password for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ token:
+ description:
+ - The authentication token to verify a user on the Storage Virtualize system.
+ - To generate a token, use the ibm_svc_auth module.
+ type: str
+ version_added: '1.5.0'
+ log_path:
+ description:
+ - Path of debug log file.
+ type: str
+ validate_certs:
+ description:
+ - Validates certification.
+ default: false
+ type: bool
+ remotecluster:
+ description:
+ - Specifies the name of the remote system.
+ Only used while creating a consistency group.
+ type: str
+ force:
+ description:
+ - If used to delete a consistency group,
+ it specifies that you want the system to remove any
+ relationship that belongs to the consistency
+ group before the group is deleted.
+ - If used to start a consistency group,
+ it specifies that you want the system to process the
+ copy operation even if it causes a temporary loss of
+ consistency during synchronization.
+ - It is required if the consistency group is in the ConsistentStopped
+ state, but is not synchronized or is in the idling state -
+ except if consistency protection is configured.
+ type: bool
+ copytype:
+ description:
+ - Specifies the mirror type of the remote copy. 'metro' means MetroMirror, 'global' means GlobalMirror.
+ - If not specified, a MetroMirror remote copy will be created when creating a remote copy I(state=present).
+ type: str
+ choices: [ 'metro', 'global' ]
+ cyclingmode:
+ description:
+ - Specifies the behavior of Global Mirror for the relationship.
+ - Active-active relationships and relationships with cycling modes set to Multiple must always be configured with change volumes.
+ - Applies when I(state=present) and I(copytype=global).
+ type: str
+ choices: [ 'multi', 'none' ]
+ cyclingperiod:
+ description:
+ - Specifies the cycle period in seconds.
+ type: int
+author:
+ - rohit(@rohitk-github)
+notes:
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = '''
+- name: Define a new rc consistency group
+ ibm.storage_virtualize.ibm_svc_manage_replicationgroup:
+ clustername: "{{clustername}}"
+ domain: "{{domain}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ log_path: /tmp/playbook.debug
+ name: rccg4test
+ remotecluster: remotecluster
+ state: present
+- name: Delete rc consistency group
+ ibm.storage_virtualize.ibm_svc_manage_replicationgroup:
+ clustername: "{{clustername}}"
+ domain: "{{domain}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ log_path: /tmp/playbook.debug
+ name: rccg4test
+ force: true
+ state: absent
+- name: Update rc consistency group
+ ibm.storage_virtualize.ibm_svc_manage_replicationgroup:
+ clustername: "{{clustername}}"
+ domain: "{{domain}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ log_path: /tmp/playbook.debug
+ name: rccg4test
+ cyclingperiod: 60
+ state: present
+'''
+
+RETURN = '''#'''
+
+from traceback import format_exc
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import \
+ IBMSVCRestApi, svc_argument_spec, get_logger
+from ansible.module_utils._text import to_native
+
+
+class IBMSVCRCCG(object):
+ def __init__(self):
+ argument_spec = svc_argument_spec()
+
+ argument_spec.update(
+ dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', required=True, choices=['absent',
+ 'present']),
+ remotecluster=dict(type='str', required=False),
+ force=dict(type='bool', required=False),
+ copytype=dict(type='str', choices=['metro', 'global']),
+ cyclingmode=dict(type='str', required=False, choices=['multi', 'none']),
+ cyclingperiod=dict(type='int', required=False)
+ )
+ )
+
+ self.module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ # logging setup
+ log_path = self.module.params['log_path']
+ log = get_logger(self.__class__.__name__, log_path)
+ self.log = log.info
+
+ # Required
+ self.name = self.module.params['name']
+ self.state = self.module.params['state']
+
+ # Optional
+ self.cluster = self.module.params.get('remotecluster', None)
+ self.force = self.module.params.get('force', False)
+ self.copytype = self.module.params.get('copytype', None)
+ self.cyclingmode = self.module.params.get('cyclingmode', None)
+ self.cyclingperiod = self.module.params.get('cyclingperiod', None)
+
+ # Handling missing mandatory paratmeter name
+ if not self.name:
+ self.module.fail_json(msg='Missing mandatory parameter: name')
+
+ self.restapi = IBMSVCRestApi(
+ module=self.module,
+ clustername=self.module.params['clustername'],
+ domain=self.module.params['domain'],
+ username=self.module.params['username'],
+ password=self.module.params['password'],
+ validate_certs=self.module.params['validate_certs'],
+ log_path=log_path,
+ token=self.module.params['token']
+ )
+
+ def get_existing_rccg(self):
+ merged_result = {}
+
+ data = self.restapi.svc_obj_info(cmd='lsrcconsistgrp', cmdopts=None,
+ cmdargs=[self.name])
+
+ if isinstance(data, list):
+ for d in data:
+ merged_result.update(d)
+ else:
+ merged_result = data
+
+ return merged_result
+
+ def rccg_probe(self, data):
+ props = {}
+ propscv = {}
+ if self.copytype and self.copytype != data['copy_type']:
+ if self.copytype == 'global':
+ props['global'] = True
+ elif self.copytype == 'metro':
+ props['metro'] = True
+ else:
+ self.module.fail_json(msg="Unsupported mirror type: %s. Only 'global' and 'metro' are supported when modifying" % self.copytype)
+
+ if self.copytype == 'global' and self.cyclingperiod and self.cyclingperiod != int(data['cycle_period_seconds']):
+ propscv['cycleperiodseconds'] = self.cyclingperiod
+ if self.copytype == 'global' and self.cyclingmode and self.cyclingmode != data['cycling_mode']:
+ propscv['cyclingmode'] = self.cyclingmode
+
+ return props, propscv
+
+ def rccg_create(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ rccg_data = self.get_existing_rccg()
+ if rccg_data:
+ self.rccg_update(rccg_data)
+ self.log("creating rc consistgrp '%s'", self.name)
+
+ # Make command
+ cmd = 'mkrcconsistgrp'
+ cmdopts = {'name': self.name}
+ if self.cluster:
+ cmdopts['cluster'] = self.cluster
+
+ self.log("creating rc consistgrp command '%s' opts", self.cluster)
+
+ # Run command
+ result = self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
+ self.log("create rc consistgrp result '%s'", result)
+ msg = "succeeded to create rc consistgrp '%s'" % self.name
+ self.log(msg)
+
+ if 'message' in result:
+ self.log("create rc consistgrp result message '%s'",
+ result['message'])
+ self.module.exit_json(msg="rc consistgrp '%s' is created" %
+ self.name, changed=True)
+
+ else:
+ self.module.fail_json(msg=result)
+
+ def rccg_update(self, modify, modifycv):
+
+ if modify:
+ self.log("updating chrcconsistgrp with properties %s", modify)
+ cmd = 'chrcconsistgrp'
+ cmdopts = {}
+ for prop in modify:
+ cmdopts[prop] = modify[prop]
+ cmdargs = [self.name]
+
+ self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
+
+ # Any error would have been raised in svc_run_command
+ # chrcconsistgrp does not output anything when successful.
+ self.changed = True
+ if modifycv:
+ self.log("updating chrcconsistgrp with properties %s", modifycv)
+ cmd = 'chrcconsistgrp'
+ cmdargs = [self.name]
+ for prop in modifycv:
+ cmdopts = {}
+ cmdopts[prop] = modifycv[prop]
+ self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
+
+ # Any error would have been raised in svc_run_command
+ # chrcconsistgrp does not output anything when successful.
+ self.changed = True
+ if not modify and not modifycv:
+ self.log("There is no property to be updated")
+ self.changed = False
+
+ def rccg_delete(self):
+ rccg_data = self.get_existing_rccg()
+ if not rccg_data:
+ self.module.exit_json(msg="rc consistgrp '%s' did not exist" %
+ self.name, changed=False)
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ self.log("deleting rc consistgrp '%s'", self.name)
+
+ cmd = 'rmrcconsistgrp'
+ cmdopts = {'force': True} if self.force else None
+ cmdargs = [self.name]
+
+ self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
+
+ # Any error will have been raised in svc_run_command
+ # rmrcconsistgrp does not output anything when successful.
+ msg = "rc consistgrp '%s' is deleted" % self.name
+ self.log(msg)
+ self.module.exit_json(msg=msg, changed=True)
+
+ def apply(self):
+ changed = False
+ msg = None
+ modify = {}
+ rccg_data = self.get_existing_rccg()
+ if rccg_data:
+ if self.state == 'absent':
+ self.log(
+ "CHANGED: RemoteCopy group exists, requested state is 'absent'")
+ changed = True
+ elif self.state == 'present':
+ modify, modifycv = self.rccg_probe(rccg_data)
+ if modify or modifycv:
+ changed = True
+ else:
+ if self.state == 'present':
+ if self.copytype:
+ self.module.fail_json(msg="copytype cannot be passed while creating a consistency group")
+ changed = True
+ self.log(
+ "CHANGED: Remotecopy group does not exist, but requested state is '%s'", self.state)
+ if changed:
+ if self.state == 'present':
+ if not rccg_data:
+ self.rccg_create()
+ msg = "remote copy group %s has been created." % self.name
+ else:
+ self.rccg_update(modify, modifycv)
+ msg = "remote copy group [%s] has been modified." % self.name
+ elif self.state == 'absent':
+ self.rccg_delete()
+ msg = "remote copy group [%s] has been deleted." % self.name
+
+ if self.module.check_mode:
+ msg = 'skipping changes due to check mode.'
+ else:
+ self.log("exiting with no changes")
+ if self.state in ['absent']:
+ msg = "Remotecopy group [%s] does not exist." % self.name
+ else:
+ msg = "No Modifications detected, Remotecopy group [%s] already exists." % self.name
+
+ self.module.exit_json(msg=msg, changed=changed)
+
+
+def main():
+ v = IBMSVCRCCG()
+ try:
+ v.apply()
+ except Exception as e:
+ v.log("Exception in apply(): \n%s", format_exc())
+ v.module.fail_json(msg="Module failed. Error [%s]." % to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_safeguarded_policy.py b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_safeguarded_policy.py
new file mode 100644
index 000000000..dde4d40ac
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_safeguarded_policy.py
@@ -0,0 +1,342 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2022 IBM CORPORATION
+# Author(s): Sanjaikumaar M <sanjaikumaar.m@ibm.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_svc_manage_safeguarded_policy
+short_description: This module manages safeguarded policy configuration on IBM Storage Virtualize family systems
+version_added: "1.8.0"
+description:
+ - Ansible interface to manage 'mksafeguardedpolicy' and 'rmsafeguardedpolicy' safeguarded policy commands.
+ - Safeguarded copy functionality is introduced in IBM Storage Virtualize 8.4.2.
+options:
+ clustername:
+ description:
+ - The hostname or management IP of the Storage Virtualize system.
+ required: true
+ type: str
+ domain:
+ description:
+ - Domain for the Storage Virtualize system.
+ - Valid when hostname is used for the parameter I(clustername).
+ type: str
+ username:
+ description:
+ - REST API username for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ password:
+ description:
+ - REST API password for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ token:
+ description:
+ - The authentication token to verify a user on the Storage Virtualize system.
+ - To generate a token, use the M(ibm.storage_virtualize.ibm_svc_auth) module.
+ type: str
+ log_path:
+ description:
+ - Path of debug log file.
+ type: str
+ state:
+ description:
+ - Creates (C(present)) or deletes (C(absent)) a safeguarded policy.
+ - Resume (C(resume)) or suspend (C(suspend)) the safeguarded copy functionality system wide.
+ choices: [ present, absent, suspend, resume ]
+ required: true
+ type: str
+ name:
+ description:
+ - Specifies the name of safeguarded policy.
+ - Not applicable when I(state=suspend) or I(state=resume).
+ type: str
+ backupunit:
+ description:
+ - Specify the backup unit in mentioned metric.
+ - Applies when I(state=present).
+ choices: [ minute, hour, day, week, month ]
+ type: str
+ backupinterval:
+ description:
+ - Specifies the interval of backup.
+ - Applies when I(state=present).
+ type: str
+ backupstarttime:
+ description:
+ - Specifies the start time of backup in the format YYMMDDHHMM.
+ - Applies when I(state=present).
+ type: str
+ retentiondays:
+ description:
+ - Specifies the retention days for the backup.
+ - Applies when I(state=present).
+ type: str
+ validate_certs:
+ description:
+ - Validates certification.
+ default: false
+ type: bool
+author:
+ - Sanjaikumaar M (@sanjaikumaar)
+notes:
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = '''
+- name: Create safeguarded policy
+ ibm.storage_virtualize.ibm_svc_manage_safeguarded_policy:
+ clustername: "{{cluster}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ name: sgpolicy0
+ backupunit: day
+ backupinterval: 1
+ backupstarttime: 2102281800
+ retentiondays: 15
+ state: present
+- name: Suspend safeguarded copy functionality
+ ibm.storage_virtualize.ibm_svc_manage_safeguarded_policy:
+ clustername: "{{cluster}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ state: suspend
+- name: Resume safeguarded copy functionality
+ ibm.storage_virtualize.ibm_svc_manage_safeguarded_policy:
+ clustername: "{{cluster}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ state: resume
+- name: Delete safeguarded policy
+ ibm.storage_virtualize.ibm_svc_manage_safeguarded_policy:
+ clustername: "{{cluster}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ name: sgpolicy0
+ state: absent
+'''
+
+RETURN = '''#'''
+
+from traceback import format_exc
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import (
+ IBMSVCRestApi, svc_argument_spec,
+ get_logger
+)
+from ansible.module_utils._text import to_native
+
+
+class IBMSVCSafeguardedPolicy:
+
+ def __init__(self):
+ argument_spec = svc_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(
+ type='str',
+ required=True,
+ choices=['present', 'absent', 'suspend', 'resume']
+ ),
+ name=dict(
+ type='str',
+ ),
+ backupunit=dict(
+ type='str',
+ choices=['minute', 'hour', 'day', 'week', 'month'],
+ ),
+ backupinterval=dict(
+ type='str',
+ ),
+ backupstarttime=dict(
+ type='str',
+ ),
+ retentiondays=dict(
+ type='str',
+ ),
+ )
+ )
+
+ self.module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ # Required parameters
+ self.name = self.module.params['name']
+ self.state = self.module.params['state']
+ self.backupunit = self.module.params.get('backupunit', '')
+ self.backupinterval = self.module.params.get('backupinterval', '')
+ self.backupstarttime = self.module.params.get('backupstarttime', '')
+ self.retentiondays = self.module.params.get('retentiondays', '')
+
+ self.basic_checks()
+
+ # Variable to cache data
+ self.sg_policy_details = None
+
+ # logging setup
+ self.log_path = self.module.params['log_path']
+ log = get_logger(self.__class__.__name__, self.log_path)
+ self.log = log.info
+ self.changed = False
+ self.msg = ''
+
+ self.restapi = IBMSVCRestApi(
+ module=self.module,
+ clustername=self.module.params['clustername'],
+ domain=self.module.params['domain'],
+ username=self.module.params['username'],
+ password=self.module.params['password'],
+ validate_certs=self.module.params['validate_certs'],
+ log_path=self.log_path,
+ token=self.module.params['token']
+ )
+
+ def basic_checks(self):
+ if self.state == 'present':
+ fields = ['name', 'backupinterval', 'backupstarttime', 'retentiondays', 'backupunit']
+ exists = list(filter(lambda x: not getattr(self, x), fields))
+
+ if any(exists):
+ self.module.fail_json(msg="State is present but following parameters are missing: {0}".format(', '.join(exists)))
+ elif self.state == 'absent':
+ if not self.name:
+ self.module.fail_json(msg="Missing mandatory parameter: name")
+
+ fields = ['backupinterval', 'backupstarttime', 'retentiondays', 'backupunit']
+ exists = list(filter(lambda x: getattr(self, x) or getattr(self, x) == '', fields))
+
+ if any(exists):
+ self.module.fail_json(msg='{0} should not be passed when state=absent'.format(', '.join(exists)))
+ elif self.state in ['suspend', 'resume']:
+ fields = ['name', 'backupinterval', 'backupstarttime', 'retentiondays', 'backupunit']
+ exists = list(filter(lambda x: getattr(self, x) or getattr(self, x) == '', fields))
+
+ if any(exists):
+ self.module.fail_json(msg='{0} should not be passed when state={1}'.format(', '.join(exists), self.state))
+
+ def is_sg_exists(self):
+ merged_result = {}
+ data = self.restapi.svc_obj_info(
+ cmd='lssafeguardedschedule',
+ cmdopts=None,
+ cmdargs=[self.name]
+ )
+ if isinstance(data, list):
+ for d in data:
+ merged_result.update(d)
+ else:
+ merged_result = data
+
+ self.sg_policy_details = merged_result
+
+ return merged_result
+
+ def create_sg_policy(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ cmd = 'mksafeguardedpolicy'
+ cmdopts = {
+ 'name': self.name,
+ 'backupstarttime': self.backupstarttime,
+ 'backupinterval': self.backupinterval,
+ 'backupunit': self.backupunit,
+ 'retentiondays': self.retentiondays
+ }
+
+ self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
+ self.log('Safeguarded policy (%s) created', self.name)
+ self.changed = True
+
+ def sg_probe(self):
+ field_mappings = (
+ ('backupinterval', self.sg_policy_details['backup_interval']),
+ ('backupstarttime', self.sg_policy_details['backup_start_time']),
+ ('retentiondays', self.sg_policy_details['retention_days']),
+ ('backupunit', self.sg_policy_details['backup_unit'])
+ )
+ updates = []
+
+ for field, existing_value in field_mappings:
+ if field == 'backupstarttime':
+ updates.append(existing_value != '{0}00'.format(getattr(self, field)))
+ else:
+ updates.append(existing_value != getattr(self, field))
+
+ return updates
+
+ def delete_sg_policy(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ cmd = 'rmsafeguardedpolicy'
+ cmdargs = [self.name]
+
+ self.restapi.svc_run_command(cmd, cmdopts=None, cmdargs=cmdargs)
+ self.log('Safeguarded policy (%s) deleted', self.name)
+ self.changed = True
+
+ def update_safeguarded_copy_functionality(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ cmd = 'chsystem'
+ cmdopts = {'safeguardedcopysuspended': 'yes' if self.state == 'suspend' else 'no'}
+
+ self.restapi.svc_run_command(cmd, cmdopts=cmdopts, cmdargs=None)
+ self.log('Safeguarded copy functionality status changed: %s', self.state)
+ self.changed = True
+
+ def apply(self):
+ if self.state in ['resume', 'suspend']:
+ self.update_safeguarded_copy_functionality()
+ self.msg = 'Safeguarded copy functionality {0}ed'.format(self.state.rstrip('e'))
+ else:
+ if self.is_sg_exists():
+ if self.state == 'present':
+ modifications = self.sg_probe()
+ if any(modifications):
+ self.msg = 'Policy modification is not supported in ansible. Please delete and recreate new policy.'
+ else:
+ self.msg = 'Safeguarded policy ({0}) already exists. No modifications done.'.format(self.name)
+ else:
+ self.delete_sg_policy()
+ self.msg = 'Safeguarded policy ({0}) deleted.'.format(self.name)
+ else:
+ if self.state == 'absent':
+ self.msg = 'Safeguarded policy ({0}) does not exist. No modifications done.'.format(self.name)
+ else:
+ self.create_sg_policy()
+ self.msg = 'Safeguarded policy ({0}) created.'.format(self.name)
+
+ if self.module.check_mode:
+ self.msg = 'skipping changes due to check mode.'
+
+ self.module.exit_json(
+ changed=self.changed,
+ msg=self.msg
+ )
+
+
+def main():
+ v = IBMSVCSafeguardedPolicy()
+ try:
+ v.apply()
+ except Exception as e:
+ v.log("Exception in apply(): \n%s", format_exc())
+ v.module.fail_json(msg="Module failed. Error [%s]." % to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_sra.py b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_sra.py
new file mode 100644
index 000000000..f2f4b7525
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_sra.py
@@ -0,0 +1,412 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2021 IBM CORPORATION
+# Author(s): Sanjaikumaar M <sanjaikumaar.m@ibm.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_svc_manage_sra
+short_description: This module manages remote support assistance configuration on IBM Storage Virtualize family systems
+version_added: "1.7.0"
+description:
+ - Ansible interface to manage 'chsra' support remote assistance command.
+options:
+ clustername:
+ description:
+ - The hostname or management IP of the Storage Virtualize system.
+ required: true
+ type: str
+ domain:
+ description:
+ - Domain for the Storage Virtualize system.
+ - Valid when hostname is used for the parameter I(clustername).
+ type: str
+ username:
+ description:
+ - REST API username for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ password:
+ description:
+ - REST API password for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ token:
+ description:
+ - The authentication token to verify a user on the Storage Virtualize system.
+ - To generate a token, use the M(ibm.storage_virtualize.ibm_svc_auth) module.
+ type: str
+ log_path:
+ description:
+ - Path of debug log file.
+ type: str
+ state:
+ description:
+ - Enables (C(enabled)) or disables (C(disabled)) the remote support assistance.
+ choices: [ enabled, disabled ]
+ required: true
+ type: str
+ support:
+ description:
+ - Specifies the support assistance through C(remote) or C(onsite).
+ choices: [ remote, onsite ]
+ type: str
+ required: true
+ name:
+ description:
+ - Specifies the list of unique names for the support center or proxy to be defined.
+ - Required when I(support=remote), to enable remote support assistance.
+ type: list
+ elements: str
+ sra_ip:
+ description:
+ - Specifies the list of IP addresses or fully qualified domain names for the new support center or proxy server.
+ - Required when I(support=remote) and I(state=enabled), to enable support remote assistannce.
+ type: list
+ elements: str
+ sra_port:
+ description:
+ - Specifies the list of port numbers for the new support center or proxy server.
+ - Required when I(support=remote) and I(state=enabled), to enable support remote assistannce.
+ type: list
+ elements: str
+ validate_certs:
+ description:
+ - Validates certification.
+ default: false
+ type: bool
+author:
+ - Sanjaikumaar M (@sanjaikumaar)
+notes:
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = '''
+- name: Enable support remote assistance
+ ibm.storage_virtualize.ibm_svc_manage_sra:
+ clustername: "{{ clustername }}"
+ domain: "{{ domain }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ log_path: "{{ log_path }}"
+ support: remote
+ state: enabled
+ name:
+ - proxy_1
+ - proxy_2
+ - proxy_3
+ sra_ip:
+ - '0.0.0.0'
+ - '1.1.1.1'
+ - '2.1.2.2'
+ sra_port:
+ - 8888
+ - 9999
+ - 8800
+- name: Disable support remote assistance
+ ibm.storage_virtualize.ibm_svc_manage_sra:
+ clustername: "{{ clustername }}"
+ domain: "{{ domain }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ log_path: "{{ log_path }}"
+ support: remote
+ state: disabled
+ name:
+ - proxy_1
+ - proxy_2
+ - proxy_3
+'''
+
+RETURN = '''#'''
+
+from traceback import format_exc
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import (
+ IBMSVCRestApi, svc_argument_spec,
+ get_logger
+)
+from ansible.module_utils._text import to_native
+
+
+class IBMSVCSupportRemoteAssistance:
+
+ def __init__(self):
+ argument_spec = svc_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(
+ type='str',
+ required=True,
+ choices=['enabled', 'disabled']
+ ),
+ support=dict(
+ type='str',
+ required=True,
+ choices=['remote', 'onsite']
+ ),
+ name=dict(type='list', elements='str'),
+ sra_ip=dict(type='list', elements='str'),
+ sra_port=dict(type='list', elements='str')
+ )
+ )
+
+ self.module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ # Required parameters
+ self.support = self.module.params['support']
+ self.state = self.module.params['state']
+
+ # Optional parameters
+ self.name = self.module.params.get('name', [])
+ self.sra_ip = self.module.params.get('sra_ip', [])
+ self.sra_port = self.module.params.get('sra_port', [])
+
+ self.basic_checks()
+
+ # Varialbe to store some frequently used data
+ self.sra_status_detail = None
+
+ # logging setup
+ self.log_path = self.module.params['log_path']
+ log = get_logger(self.__class__.__name__, self.log_path)
+ self.log = log.info
+ self.changed = False
+ self.msg = ''
+
+ self.restapi = IBMSVCRestApi(
+ module=self.module,
+ clustername=self.module.params['clustername'],
+ domain=self.module.params['domain'],
+ username=self.module.params['username'],
+ password=self.module.params['password'],
+ validate_certs=self.module.params['validate_certs'],
+ log_path=self.log_path,
+ token=self.module.params['token']
+ )
+
+ def basic_checks(self):
+ self.filtered_params = dict(
+ filter(
+ lambda item: item[0] in ['name', 'sra_ip', 'sra_port'],
+ self.module.params.items()
+ )
+ )
+ if self.support == 'remote' and self.state == 'enabled':
+ if self.name and self.sra_ip and self.sra_port:
+ if len(self.name) == len(self.sra_ip) == len(self.sra_port):
+ if not all([all(self.name), all(self.sra_ip), all(self.sra_port)]):
+ missing_params = ', '.join([k for k, v in self.filtered_params.items() if not all(v)])
+ self.module.fail_json(
+ msg='{0} should not contain blank values'.format(missing_params)
+ )
+ else:
+ self.module.fail_json(
+ msg='Name, sra_ip and sra_port parameters should contain same number of arguments'
+ )
+ else:
+ missing_params = ', '.join([k for k, v in self.filtered_params.items() if not v])
+ self.module.fail_json(
+ msg='support is remote and state is enabled but following parameter missing: {0}'.format(missing_params)
+ )
+ elif self.support == 'remote' and self.state == 'disabled':
+ if self.sra_ip or self.sra_port:
+ invalid_params = ', '.join([k for k, v in self.filtered_params.items() if k in ['sra_ip', 'sra_port'] and v])
+ self.module.fail_json(
+ msg='{0} should not be passed when support=remote and state=disabled'.format(invalid_params)
+ )
+ elif self.support == 'onsite':
+ if self.name or self.sra_ip or self.sra_port:
+ invalid_params = ', '.join([k for k, v in self.filtered_params.items()])
+ self.module.fail_json(
+ msg='{0} should not be passed when support=onsite'.format(invalid_params)
+ )
+
+ def is_sra_enabled(self):
+ if self.sra_status_detail:
+ return self.sra_status_detail['status'] == 'enabled'
+
+ result = self.restapi.svc_obj_info(
+ cmd='lssra',
+ cmdopts=None,
+ cmdargs=None
+ )
+ self.sra_status_detail = result
+ return result['status'] == 'enabled'
+
+ def is_remote_support_enabled(self):
+ if self.sra_status_detail:
+ return self.sra_status_detail['remote_support_enabled'] == 'yes'
+
+ result = self.restapi.svc_obj_info(
+ cmd='lssra',
+ cmdopts=None,
+ cmdargs=None
+ )
+ return result['remote_support_enabled'] == 'yes'
+
+ def is_proxy_exist(self, obj_name):
+ obj = {}
+ result = self.restapi.svc_obj_info(
+ cmd='lssystemsupportcenter',
+ cmdopts=None,
+ cmdargs=[obj_name]
+ )
+
+ if isinstance(result, list):
+ for d in result:
+ obj.update(d)
+ else:
+ obj = result
+
+ return obj
+
+ def sra_probe(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ message = ''
+ if (self.support == 'remote' and not self.is_remote_support_enabled()) \
+ or (self.support == 'onsite' and self.is_remote_support_enabled()):
+
+ message += 'SRA configuration cannot be updated right now. '
+
+ if any(self.add_proxy_details()):
+ message += 'Proxy server details cannot be updated when SRA is enabled. '
+
+ message += 'Please disable SRA and try to update.' if message else ''
+
+ self.msg = message if message else self.msg
+
+ return self.msg
+
+ def add_proxy_details(self):
+ existed = []
+ if self.support == 'remote':
+ cmd = 'mksystemsupportcenter'
+ cmdargs = []
+
+ for nm, ip, port in zip(self.name, self.sra_ip, self.sra_port):
+ if nm != 'None' and ip != 'None' and port != 'None':
+ if not self.is_proxy_exist(nm):
+ existed.append(True)
+ if not self.is_sra_enabled():
+ cmdopts = {
+ 'name': nm,
+ 'ip': ip,
+ 'port': port,
+ 'proxy': 'yes'
+ }
+ self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
+ self.log('Proxy server(%s) details added', nm)
+ else:
+ self.log('Skipping, Proxy server(%s) already exist', nm)
+ else:
+ missing_params = ', '.join([k for k, v in self.filtered_params.items() if 'None' in v])
+ self.module.fail_json(
+ msg='support is remote and state is enabled but following parameter missing: {0}'.format(missing_params)
+ )
+
+ return existed
+
+ def remove_proxy_details(self):
+ if self.support == 'remote':
+ cmd = 'rmsystemsupportcenter'
+ cmdopts = {}
+
+ for nm in self.name:
+ if nm and nm != 'None':
+ if self.is_proxy_exist(nm):
+ cmdargs = [nm]
+ self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
+ self.log('Proxy server(%s) details removed', nm)
+ else:
+ self.log('Proxy server(%s) does not exist', nm)
+ else:
+ self.module.fail_json(
+ msg='support is remote and state is disabled but following parameter is blank: name'
+ )
+
+ def enable_sra(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ self.add_proxy_details()
+
+ cmd = 'chsra'
+ cmdopts = {}
+ cmdargs = ['-enable']
+
+ self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
+
+ if self.support == 'remote':
+ cmdargs = ['-remotesupport', 'enable']
+ self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
+
+ self.log('%s support assistance enabled', self.support.capitalize())
+
+ self.changed = True
+
+ def disable_sra(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ cmd = 'chsra'
+ cmdopts = {}
+
+ if self.support == 'remote':
+ cmdargs = ['-remotesupport', 'disable']
+ self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
+
+ cmdargs = ['-disable']
+ self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
+ self.log('%s support assistance disabled', self.support.capitalize())
+
+ self.remove_proxy_details()
+ self.changed = True
+
+ def apply(self):
+ if self.is_sra_enabled():
+ if self.state == 'enabled':
+ if not self.sra_probe():
+ self.msg = 'Support remote assistance already enabled. '\
+ 'No modifications done.'
+ else:
+ self.disable_sra()
+ self.msg = 'Support remote assistance disabled.'
+ else:
+ if self.state == 'disabled':
+ self.msg = 'Support remote assistance is already disabled.'
+ else:
+ self.enable_sra()
+ self.msg = 'Support remote assistance({0}) enabled.'.format(
+ self.support
+ )
+
+ if self.module.check_mode:
+ self.msg = 'skipping changes due to check mode.'
+
+ self.module.exit_json(msg=self.msg, changed=self.changed)
+
+
+def main():
+ v = IBMSVCSupportRemoteAssistance()
+ try:
+ v.apply()
+ except Exception as e:
+ v.log('Exception in apply(): \n%s', format_exc())
+ v.module.fail_json(msg='Module failed. Error [%s].' % to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_user.py b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_user.py
new file mode 100644
index 000000000..bb75b38b2
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_user.py
@@ -0,0 +1,385 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2021 IBM CORPORATION
+# Author(s): Sreshtant Bohidar <sreshtant.bohidar@ibm.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_svc_manage_user
+short_description: This module manages user on IBM Storage Virtualize family systems
+description:
+ - Ansible interface to manage 'mkuser', 'rmuser', and 'chuser' commands.
+version_added: "1.7.0"
+options:
+ name:
+ description:
+ - Specifies the unique username.
+ required: true
+ type: str
+ state:
+ description:
+ - Creates or updates (C(present)) or removes (C(absent)) a user.
+ choices: [ present, absent ]
+ required: true
+ type: str
+ clustername:
+ description:
+ - The hostname or management IP of the Storage Virtualize system.
+ type: str
+ required: true
+ domain:
+ description:
+ - Domain for the Storage Virtualize system.
+ - Valid when hostname is used for the parameter I(clustername).
+ type: str
+ username:
+ description:
+ - REST API username for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ password:
+ description:
+ - REST API password for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ token:
+ description:
+ - The authentication token to verify a user on the Storage Virtualize system.
+ - To generate a token, use the ibm_svc_auth module.
+ type: str
+ user_password:
+ description:
+ - Specifies the password associated with the user.
+ - Applies when I(state=present).
+ type: str
+ nopassword:
+ description:
+ - Specifies that the user's password is to be deleted.
+ - Applies when I(state=present), to modify a user.
+ type: bool
+ keyfile:
+ description:
+ - Specifies the name of the file containing the Secure Shell (SSH) public key.
+ - Applies when I(state=present).
+ type: str
+ nokey:
+ description:
+ - Specifies that the user's SSH key is to be deleted.
+ - Applies when I(state=present), to modify a user.
+ type: bool
+ auth_type:
+ description:
+ - Specifies whether the user authenticates to the system using a remote authentication service or system authentication methods.
+ - Only supported value is 'usergrp'.
+ - Required when I(state=present), to create a user.
+ choices: [ usergrp ]
+ type: str
+ usergroup:
+ description:
+ - Specifies the name of the user group with which the local user is to be associated.
+ - Applies when I(state=present) and I(auth_type=usergrp).
+ type: str
+ forcepasswordchange:
+ description:
+ - Specifies that the password is to be changed on next login.
+ - Applies when I(state=present), to modify a user.
+ type: bool
+ lock:
+ description:
+ - Specifies to lock the account indefinitely. The user cannot log in unless unlocked again with the parameter I(unlock).
+ - Applies when I(state=present), to modify a user.
+ - Parameters I(lock) and I(unlock) are mutually exclusive.
+ type: bool
+ unlock:
+ description:
+ - Specifies to unlock the account so it can be logged in to again.
+ - Applies when I(state=present), to modify a user.
+ - Parameters I(lock) and I(unlock) are mutually exclusive.
+ type: bool
+ validate_certs:
+ description:
+ - Validates certification.
+ default: false
+ type: bool
+ log_path:
+ description:
+ - Path of debug log file.
+ type: str
+author:
+ - Sreshtant Bohidar(@Sreshtant-Bohidar)
+notes:
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = '''
+- name: Create a user
+ ibm.storage_virtualize.ibm_svc_manage_user:
+ clustername: "{{clustername}}"
+ domain: "{{domain}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ log_path: /tmp/playbook.debug
+ state: present
+ name: user-name
+ user_password: user-password
+ auth_type: usergrp
+ usergroup: usergroup-name
+- name: Remove a user
+ ibm.storage_virtualize.ibm_svc_manage_user:
+ clustername: "{{clustername}}"
+ domain: "{{domain}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ log_path: /tmp/playbook.debug
+ state: absent
+ name: user-name
+'''
+
+RETURN = '''#'''
+
+from traceback import format_exc
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi, svc_argument_spec, get_logger
+from ansible.module_utils._text import to_native
+
+
+class IBMSVCUser(object):
+ def __init__(self):
+ argument_spec = svc_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', required=True, choices=['present', 'absent']),
+ auth_type=dict(type='str', required=False, choices=['usergrp']),
+ user_password=dict(type='str', required=False, no_log=True),
+ nopassword=dict(type='bool', required=False),
+ keyfile=dict(type='str', required=False, no_log=True),
+ nokey=dict(type='bool', required=False),
+ forcepasswordchange=dict(type='bool', required=False),
+ lock=dict(type='bool', required=False),
+ unlock=dict(type='bool', required=False),
+ usergroup=dict(type='str', required=False),
+ )
+ )
+
+ self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ # logging setup
+ log_path = self.module.params['log_path']
+ log = get_logger(self.__class__.__name__, log_path)
+ self.log = log.info
+
+ # Required
+ self.name = self.module.params['name']
+ self.state = self.module.params['state']
+
+ # Required during creation of user
+ self.auth_type = self.module.params['auth_type']
+ self.usergroup = self.module.params['usergroup']
+
+ # Optional
+ self.user_password = self.module.params.get('user_password', False)
+ self.nopassword = self.module.params.get('nopassword', False)
+ self.keyfile = self.module.params.get('keyfile', False)
+ self.nokey = self.module.params.get('nokey', False)
+ self.forcepasswordchange = self.module.params.get('forcepasswordchange', False)
+ self.lock = self.module.params.get('lock', False)
+ self.unlock = self.module.params.get('unlock', False)
+
+ # creating an instance of IBMSVCRestApi
+ self.restapi = IBMSVCRestApi(
+ module=self.module,
+ clustername=self.module.params['clustername'],
+ domain=self.module.params['domain'],
+ username=self.module.params['username'],
+ password=self.module.params['password'],
+ validate_certs=self.module.params['validate_certs'],
+ log_path=log_path,
+ token=self.module.params['token']
+ )
+
+ # perform some basic checks
+ def basic_checks(self):
+ # Handling for mandatory parameter name
+ if not self.name:
+ self.module.fail_json(msg="Missing mandatory parameter: name")
+ # Handling for mandatory parameter state
+ if not self.state:
+ self.module.fail_json(msg="Missing mandatory parameter: state")
+ # Handling mutually exclusive cases amoung parameters
+ if self.user_password and self.nopassword:
+ self.module.fail_json(msg="Mutually exclusive parameter: user_password, nopassword")
+ if self.lock and self.unlock:
+ self.module.fail_json(msg="Mutually exclusive parameter: lock, unlock")
+ if self.keyfile and self.nokey:
+ self.module.fail_json(msg="Mutually exclusive parameter: keyfile, nokey")
+ if self.auth_type == 'usergrp' and not self.usergroup:
+ self.module.fail_json(msg="Parameter [usergroup] is required when auth_type is usergrp")
+
+ # function to get user data
+ def get_existing_user(self):
+ merged_result = {}
+ data = self.restapi.svc_obj_info(cmd='lsuser', cmdopts=None, cmdargs=[self.name])
+ self.log('GET: user data: %s', data)
+ if isinstance(data, list):
+ for d in data:
+ merged_result.update(d)
+ else:
+ merged_result = data
+
+ return merged_result
+
+ # function for creating new user
+ def create_user(self):
+ # Handling unsupported parameter during user creation
+ if self.nokey or self.nopassword or self.lock or self.unlock or self.forcepasswordchange:
+ self.module.fail_json(msg="Parameters [nokey, nopassword, lock, unlock, forcepasswordchange] not applicable while creating a user")
+ # Handling for mandatory parameter role
+ if not self.auth_type:
+ self.module.fail_json(msg="Missing required parameter: auth_type")
+ if self.auth_type == 'usergrp' and not self.usergroup:
+ self.module.fail_json(msg="Missing required parameter: usergroup")
+ if self.module.check_mode:
+ self.changed = True
+ return
+ command = 'mkuser'
+ command_options = {
+ 'name': self.name,
+ }
+ if self.user_password:
+ command_options['password'] = self.user_password
+ if self.keyfile:
+ command_options['keyfile'] = self.keyfile
+ if self.usergroup:
+ command_options['usergrp'] = self.usergroup
+ if self.forcepasswordchange:
+ command_options['forcepasswordchange'] = self.forcepasswordchange
+
+ result = self.restapi.svc_run_command(command, command_options, cmdargs=None)
+ self.log("create user result %s", result)
+ if 'message' in result:
+ self.changed = True
+ self.log("create user result message %s", result['message'])
+ else:
+ self.module.fail_json(
+ msg="Failed to create user [%s]" % self.name)
+
+ # function for probing an existing user
+ def probe_user(self, data):
+ properties = {}
+
+ if self.usergroup:
+ if self.usergroup != data['usergrp_name']:
+ properties['usergrp'] = self.usergroup
+ if self.user_password:
+ properties['password'] = self.user_password
+ if self.nopassword:
+ if data['password'] == 'yes':
+ properties['nopassword'] = True
+ if self.keyfile:
+ properties['keyfile'] = self.keyfile
+ if self.nokey:
+ if data['ssh_key'] == "yes":
+ properties['nokey'] = True
+ if self.lock:
+ properties['lock'] = True
+ if self.unlock:
+ properties['unlock'] = True
+ if self.forcepasswordchange:
+ properties['forcepasswordchange'] = True
+
+ return properties
+
+ # function for updating an existing user
+ def update_user(self, data):
+ if self.module.check_mode:
+ self.changed = True
+ return
+ self.log("updating user '%s'", self.name)
+ command = 'chuser'
+ for parameter in data:
+ command_options = {
+ parameter: data[parameter]
+ }
+ self.restapi.svc_run_command(command, command_options, [self.name])
+ self.changed = True
+
+ # function for removing an existing user
+ def remove_user(self):
+ # Handling unsupported parameter during user removal
+ if self.nokey or self.nopassword or self.lock or self.unlock or self.forcepasswordchange:
+ self.module.fail_json(msg="Parameters [nokey, nopassword, lock, unlock, forcepasswordchange] not applicable while removing a user")
+ if self.module.check_mode:
+ self.changed = True
+ return
+ self.log("deleting user '%s'", self.name)
+ command = 'rmuser'
+ command_options = None
+ cmdargs = [self.name]
+ self.restapi.svc_run_command(command, command_options, cmdargs)
+ self.changed = True
+
+ def apply(self):
+ changed = False
+ msg = None
+ modify = {}
+ self.basic_checks()
+
+ user_data = self.get_existing_user()
+
+ if user_data:
+ if self.state == 'absent':
+ self.log("CHANGED: user exists, but requested state is 'absent'")
+ changed = True
+ elif self.state == 'present':
+ # initiate probing of an existing user
+ modify = self.probe_user(user_data)
+ if modify:
+ self.log("CHANGED: user exists, but probe detected changes")
+ changed = True
+ else:
+ if self.state == 'present':
+ self.log("CHANGED: user does not exist, but requested state is 'present'")
+ changed = True
+ if changed:
+ if self.state == 'present':
+ if not user_data:
+ # initiate creation of new user
+ self.create_user()
+ msg = "User [%s] has been created." % self.name
+ else:
+ # initiate updation os an existing user
+ self.update_user(modify)
+ msg = "User [%s] has been modified." % self.name
+ elif self.state == 'absent':
+ # initiate deletion of an existing user
+ self.remove_user()
+ msg = "User [%s] has been removed." % self.name
+ if self.module.check_mode:
+ msg = "Skipping changes due to check mode."
+ else:
+ if self.state == 'absent':
+ msg = "User [%s] does not exist." % self.name
+ elif self.state == 'present':
+ msg = "User [%s] already exist (no modificationes detected)." % self.name
+
+ self.module.exit_json(msg=msg, changed=changed)
+
+
+def main():
+ v = IBMSVCUser()
+ try:
+ v.apply()
+ except Exception as e:
+ v.log("Exception in apply(): \n%s", format_exc())
+ v.module.fail_json(msg="Module failed. Error [%s]." % to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_usergroup.py b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_usergroup.py
new file mode 100644
index 000000000..86a3c0462
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_usergroup.py
@@ -0,0 +1,321 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2021 IBM CORPORATION
+# Author(s): Sreshtant Bohidar <sreshtant.bohidar@ibm.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_svc_manage_usergroup
+short_description: This module manages user group on IBM Storage Virtualize family systems
+description:
+ - Ansible interface to manage 'mkusergrp', 'rmusergrp', and 'chusergrp' commands.
+version_added: "1.7.0"
+options:
+ name:
+ description:
+ - Specifies the name of the user group.
+ required: true
+ type: str
+ state:
+ description:
+ - Creates or updates (C(present)) or removes (C(absent)) a user group.
+ choices: [ present, absent ]
+ required: true
+ type: str
+ clustername:
+ description:
+ - The hostname or management IP of the Storage Virtualize system.
+ type: str
+ required: true
+ domain:
+ description:
+ - Domain for the Storage Virtualize system.
+ - Valid when hostname is used for the parameter I(clustername).
+ type: str
+ username:
+ description:
+ - REST API username for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ password:
+ description:
+ - REST API password for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ token:
+ description:
+ - The authentication token to verify a user on the Storage Virtualize system.
+ - To generate a token, use the ibm_svc_auth module.
+ type: str
+ role:
+ description:
+ - Specifies the role associated with all users that belong to this user group.
+ - Required when I(state=present).
+ choices: [ Monitor, CopyOperator, Service, FlashCopyAdmin, Administrator, SecurityAdmin, VasaProvider, RestrictedAdmin, 3SiteAdmin ]
+ type: str
+ ownershipgroup:
+ description:
+ - Specifies the name of the ownership group.
+ - Applies when I(state=present).
+ - Parameters I(ownershipgroup) and I(noownershipgroup) are mutually exclusive.
+ type: str
+ noownershipgroup:
+ description:
+ - Specifies that the usergroup is removed from the ownership group it belonged to.
+ - Applies when I(state=present), to modify a user group.
+ - Parameters I(ownershipgroup) and I(noownershipgroup) are mutually exclusive.
+ type: bool
+ validate_certs:
+ description:
+ - Validates certification.
+ default: false
+ type: bool
+ log_path:
+ description:
+ - Path of debug log file.
+ type: str
+author:
+ - Sreshtant Bohidar(@Sreshtant-Bohidar)
+notes:
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = '''
+- name: Create a user group
+ ibm.storage_virtualize.ibm_svc_manage_usergroup:
+ clustername: "{{clustername}}"
+ domain: "{{domain}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ log_path: /tmp/playbook.debug
+ state: present
+ name: user-group-name
+ role: Monitor
+ ownershipgroup: ownershipgroup-name
+- name: Remove a user group
+ ibm.storage_virtualize.ibm_svc_manage_usergroup:
+ clustername: "{{clustername}}"
+ domain: "{{domain}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ log_path: /tmp/playbook.debug
+ state: absent
+ name: user-group-name
+'''
+
+RETURN = '''#'''
+
+from traceback import format_exc
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi, svc_argument_spec, get_logger
+from ansible.module_utils._text import to_native
+
+
+class IBMSVCUsergroup(object):
+ def __init__(self):
+ argument_spec = svc_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type='str', required=True),
+ role=dict(type='str', required=False, choices=[
+ 'Monitor', 'CopyOperator', 'Service', 'FlashCopyAdmin',
+ 'Administrator', 'SecurityAdmin', 'VasaProvider',
+ 'RestrictedAdmin', '3SiteAdmin'
+ ]),
+ ownershipgroup=dict(type='str', required=False),
+ noownershipgroup=dict(type='bool', required=False),
+ state=dict(type='str', required=True, choices=['present', 'absent'])
+ )
+ )
+
+ self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ # logging setup
+ log_path = self.module.params['log_path']
+ log = get_logger(self.__class__.__name__, log_path)
+ self.log = log.info
+
+ # Required
+ self.name = self.module.params['name']
+ self.state = self.module.params['state']
+
+ # Required during creation of user group
+ self.role = self.module.params['role']
+
+ # Optional
+ self.ownershipgroup = self.module.params.get('ownershipgroup', False)
+ self.noownershipgroup = self.module.params.get('noownershipgroup', False)
+
+ # creating an instance of IBMSVCRestApi
+ self.restapi = IBMSVCRestApi(
+ module=self.module,
+ clustername=self.module.params['clustername'],
+ domain=self.module.params['domain'],
+ username=self.module.params['username'],
+ password=self.module.params['password'],
+ validate_certs=self.module.params['validate_certs'],
+ log_path=log_path,
+ token=self.module.params['token']
+ )
+
+ # perform some basic checks
+ def basic_checks(self):
+ # Handling for mandatory parameter name
+ if not self.name:
+ self.module.fail_json(msg="Missing mandatory parameter: name")
+ # Handling for mandatory parameter state
+ if not self.state:
+ self.module.fail_json(msg="Missing mandatory parameter: state")
+ # Handing mutually exclusive cases
+ if self.ownershipgroup and self.noownershipgroup:
+ self.module.fail_json(msg="Mutually exclusive parameter: ownershipgroup, noownershipgroup")
+ # Handling unsupported parameter while removing an usergroup
+ if self.state == 'absent' and (self.role or self.ownershipgroup or self.noownershipgroup):
+ self.module.fail_json(msg="Parameters [role, ownershipgroup, noownershipgroup] are not applicable while removing a usergroup")
+
+ # function to get user group data
+ def get_existing_usergroup(self):
+ merged_result = {}
+ data = self.restapi.svc_obj_info(cmd='lsusergrp', cmdopts=None, cmdargs=[self.name])
+ self.log('GET: user group data: %s', data)
+ if isinstance(data, list):
+ for d in data:
+ merged_result.update(d)
+ else:
+ merged_result = data
+
+ return merged_result
+
+ # function for creating new user group
+ def create_user_group(self):
+ # Handling unsupported parameter during usergroup creation
+ if self.noownershipgroup:
+ self.module.fail_json(msg="Parameter [noownershipgroup] is not applicable while creating a usergroup")
+ # Handling for mandatory parameter role
+ if not self.role:
+ self.module.fail_json(msg="Missing mandatory parameter: role")
+ if self.module.check_mode:
+ self.changed = True
+ return
+ command = 'mkusergrp'
+ command_options = {
+ 'name': self.name,
+ }
+ if self.role:
+ command_options['role'] = self.role
+ if self.ownershipgroup:
+ command_options['ownershipgroup'] = self.ownershipgroup
+ result = self.restapi.svc_run_command(command, command_options, cmdargs=None)
+ self.log("create user group result %s", result)
+ if 'message' in result:
+ self.changed = True
+ self.log("create user group result message %s", result['message'])
+ else:
+ self.module.fail_json(
+ msg="Failed to user volume group [%s]" % self.name)
+
+ # function for probing an existing user group
+ def probe_user_group(self, data):
+ properties = {}
+ if self.role:
+ if self.role != data['role']:
+ properties['role'] = self.role
+ if self.ownershipgroup:
+ if self.ownershipgroup != data['owner_name']:
+ properties['ownershipgroup'] = self.ownershipgroup
+ if self.noownershipgroup:
+ if data['owner_name']:
+ properties['noownershipgroup'] = True
+ return properties
+
+ # function for updating an existing user group
+ def update_user_group(self, data):
+ if self.module.check_mode:
+ self.changed = True
+ return
+ self.log("updating user group '%s'", self.name)
+ command = 'chusergrp'
+ command_options = {}
+ if 'role' in data:
+ command_options['role'] = data['role']
+ if 'ownershipgroup' in data:
+ command_options['ownershipgroup'] = data['ownershipgroup']
+ if 'noownershipgroup' in data:
+ command_options['noownershipgroup'] = True
+ cmdargs = [self.name]
+ self.restapi.svc_run_command(command, command_options, cmdargs)
+ self.changed = True
+
+ # function for removing an existing user group
+ def remove_user_group(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+ self.log("deleting user group '%s'", self.name)
+ command = 'rmusergrp'
+ command_options = None
+ cmdargs = [self.name]
+ self.restapi.svc_run_command(command, command_options, cmdargs)
+ self.changed = True
+
+ def apply(self):
+ changed = False
+ msg = None
+ modify = {}
+ self.basic_checks()
+
+ user_group_data = self.get_existing_usergroup()
+
+ if user_group_data:
+ if self.state == 'absent':
+ self.log("CHANGED: user group exists, but requested state is 'absent'")
+ changed = True
+ elif self.state == 'present':
+ # initiate probing
+ modify = self.probe_user_group(user_group_data)
+ if modify:
+ self.log("CHANGED: user group exists, but probe detected changes")
+ changed = True
+ else:
+ if self.state == 'present':
+ self.log("CHANGED: user group does not exist, but requested state is 'present'")
+ changed = True
+ if changed:
+ if self.state == 'present':
+ if not user_group_data:
+ self.create_user_group()
+ msg = "User group [%s] has been created." % self.name
+ else:
+ self.update_user_group(modify)
+ msg = "User group [%s] has been modified." % self.name
+ elif self.state == 'absent':
+ self.remove_user_group()
+ msg = "User group [%s] has been removed." % self.name
+ if self.module.check_mode:
+ msg = "Skipping changes due to check mode."
+ else:
+ if self.state == 'absent':
+ msg = "User group [%s] does not exist." % self.name
+ elif self.state == 'present':
+ msg = "User group [%s] already exist (no modificationes detected)." % self.name
+
+ self.module.exit_json(msg=msg, changed=changed)
+
+
+def main():
+ v = IBMSVCUsergroup()
+ try:
+ v.apply()
+ except Exception as e:
+ v.log("Exception in apply(): \n%s", format_exc())
+ v.module.fail_json(msg="Module failed. Error [%s]." % to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_volume.py b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_volume.py
new file mode 100644
index 000000000..0fa3f30c4
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_volume.py
@@ -0,0 +1,867 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2021 IBM CORPORATION
+# Author(s): Sreshtant Bohidar <sreshtant.bohidar@ibm.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_svc_manage_volume
+short_description: This module manages standard volumes on IBM Storage Virtualize family systems
+description:
+ - Ansible interface to manage 'mkvolume', 'rmvolume', and 'chvdisk' volume commands.
+version_added: "1.6.0"
+options:
+ name:
+ description:
+ - Specifies the name to assign to the new volume.
+ required: true
+ type: str
+ state:
+ description:
+ - Creates or updates (C(present)) or removes (C(absent)) a volume.
+ choices: [ absent, present ]
+ required: true
+ type: str
+ clustername:
+ description:
+ - The hostname or management IP of the Storage Virtualize system.
+ required: true
+ type: str
+ domain:
+ description:
+ - Domain for the Storage Virtualize system.
+ - Valid when hostname is used for the parameter I(clustername).
+ type: str
+ username:
+ description:
+ - REST API username for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ password:
+ description:
+ - REST API password for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ token:
+ description:
+ - The authentication token to verify a user on the Storage Virtualize system.
+ - To generate a token, use the ibm_svc_auth module.
+ type: str
+ pool:
+ description:
+ - Specifies the name of the storage pool to use while creating the volume.
+ - This parameter is required when I(state=present), to create a volume.
+ type: str
+ size:
+ description:
+ - Defines the size of the volume. This parameter can also be used to resize an existing volume.
+ - Required when I(state=present), to create or modify a volume.
+ type: str
+ unit:
+ description:
+ - Specifies the data units to use with the capacity that is specified by the 'size' parameter.
+ - I(size) is required when using I(unit).
+ type: str
+ choices: [ b, kb, mb, gb, tb, pb ]
+ default: mb
+ iogrp:
+ description:
+ - Specifies the list of I/O group names. Group names in the list must be separated by using a comma.
+ - While creating a new volume, the first I/O group in the list is added as both cached & access I/O group,
+ while remaining I/O groups are added as access I/O groups.
+ - This parameter supports update functionality.
+ - Valid when I(state=present), to create or modify a volume.
+ type: str
+ thin:
+ description:
+ - Specifies that a thin-provisioned volume is to be created.
+ - Parameters 'thin' and 'compressed' are mutually exclusive.
+ - Valid when I(state=present), to create a thin-provisioned volume.
+ type: bool
+ type:
+ description:
+ - Specifies the type of volume to create. Volume can be thinclone or clone type.
+ - Valid when I(state=present), to create a thinclone or clone volume.
+ - Supported from Storage Virtualize family systems from 8.6.2.0 or later.
+ choices: [thinclone, clone]
+ type: str
+ fromsourcevolume:
+ description:
+ - Specifies the volume name in the snapshot used to pre-populate clone or thinclone volume.
+ - Valid when I(state=present), to create a thinclone or clone volume.
+ - Supported from Storage Virtualize family systems from 8.6.2.0 or later.
+ type: str
+ compressed:
+ description:
+ - Specifies that a compressed volume is to be created.
+ - Parameters 'compressed' and 'thin' are mutually exclusive.
+ - Valid when I(state=present), to create a compressed volume.
+ type: bool
+ buffersize:
+ description:
+ - Specifies the pool capacity that the volume will reserve as a buffer for thin-provisioned and compressed volumes.
+ - Parameter 'thin' or 'compressed' must be specified to use this parameter.
+ - The default buffer size is 2%.
+ - I(thin) or I(compressed) is required when using I(buffersize).
+ - Valid when I(state=present), to create a volume.
+ type: str
+ deduplicated:
+ description:
+ - Specifies that a deduplicated volume is to be created.
+ - Required when I(state=present), to create a deduplicated volume.
+ type: bool
+ volumegroup:
+ description:
+ - Specifies the name of the volumegroup to which the volume is to be added.
+ - Parameters 'volumegroup' and 'novolumegroup' are mutually exclusive.
+ - Valid when I(state=present), to create or modify a volume.
+ type: str
+ novolumegroup:
+ description:
+ - If specified `True`, the volume is removed from its associated volumegroup.
+ - Parameters 'novolumegroup' and 'volumegroup' are mutually exclusive.
+ - Valid when I(state=present), to modify a volume.
+ type: bool
+ old_name:
+ description:
+ - Specifies the old name of the volume during renaming.
+ - Valid when I(state=present), to rename an existing volume.
+ type: str
+ version_added: '1.9.0'
+ enable_cloud_snapshot:
+ description:
+ - Specify to enable or disable cloud snapshot.
+ - Valid when I(state=present), to modify an existing volume.
+ type: bool
+ version_added: '1.11.0'
+ cloud_account_name:
+ description:
+ - Specifies the name of the cloud account name.
+ - Valid when I(enable_cloud_snapshot=true).
+ type: str
+ version_added: '1.11.0'
+ allow_hs:
+ description:
+ - If specified `True`, manages the hyperswap volume by ignoring the volume type validation.
+ - Valid when I(state=present), to modify an existing volume.
+ type: bool
+ default: false
+ version_added: '2.0.0'
+ validate_certs:
+ description:
+ - Validates certification.
+ default: false
+ type: bool
+ log_path:
+ description:
+ - Path of debug log file.
+ type: str
+author:
+ - Sreshtant Bohidar(@Sreshtant-Bohidar)
+notes:
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = '''
+- name: Create a volume
+ ibm.storage_virtualize.ibm_svc_manage_volume:
+ clustername: "{{ clustername }}"
+ domain: "{{domain}}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ log_path: "{{ log_path }}"
+ name: "volume_name"
+ state: "present"
+ pool: "pool_name"
+ size: "1"
+ unit: "gb"
+ iogrp: "io_grp0, io_grp1"
+ volumegroup: "test_volumegroup"
+- name: Create a thin-provisioned volume
+ ibm.storage_virtualize.ibm_svc_manage_volume:
+ clustername: "{{ clustername }}"
+ domain: "{{ domain }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ log_path: "{{ log_path }}"
+ name: "volume_name"
+ state: "present"
+ pool: "pool_name"
+ size: "1"
+ unit: "gb"
+ iogrp: "io_grp0, io_grp1"
+ thin: true
+ buffersize: 10%
+- name: Create a compressed volume
+ ibm.storage_virtualize.ibm_svc_manage_volume:
+ clustername: "{{ clustername }}"
+ domain: "{{ domain }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ log_path: "{{ log_path }}"
+ name: "volume_name"
+ state: "present"
+ pool: "pool_name"
+ size: "1"
+ unit: "gb"
+ iogrp: "io_grp0, io_grp1"
+ compressed: true
+ buffersize: 10%
+- name: Creating a volume with iogrp- io_grp0
+ ibm.storage_virtualize.ibm_svc_manage_volume:
+ clustername: "{{ clustername }}"
+ domain: "{{ domain}}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ log_path: "{{ log_path }}"
+ name: "volume_name"
+ state: "present"
+ pool: "pool_name"
+ size: "1"
+ unit: "gb"
+ iogrp: "io_grp0"
+- name: Create thinclone volume from volume vol1
+ ibm_svc_manage_volume:
+ clustername: "{{ clustername }}"
+ domain: "{{ domain }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ name: "vol1_thinclone"
+ fromsourcevolume: "vol1"
+ state: "present"
+ pool: "pool0"
+- name: Create clone volume from volume vol1
+ ibm_svc_manage_volume:
+ clustername: "{{ clustername }}"
+ domain: "{{ domain }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ name: "vol1_clone"
+ fromsourcevolume: "vol1"
+ state: "present"
+ pool: "pool0"
+- name: Adding a new iogrp- io_grp1
+ ibm.storage_virtualize.ibm_svc_manage_volume:
+ clustername: "{{ clustername }}"
+ domain: "{{ domain }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ log_path: "{{ log_path }}"
+ name: "volume_name"
+ state: "present"
+ pool: "pool_name"
+ size: "1"
+ unit: "gb"
+ iogrp: "io_grp0, iogrp1"
+- name: Rename an existing volume
+ ibm.storage_virtualize.ibm_svc_manage_volume:
+ clustername: "{{ clustername }}"
+ domain: "{{ domain }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ old_name: "volume_name"
+ name: "new_volume_name"
+ state: "present"
+- name: Enable cloud backup in an existing volume
+ ibm.storage_virtualize.ibm_svc_manage_volume:
+ clustername: "{{ clustername }}"
+ domain: "{{ domain }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ name: "volume_name"
+ enable_cloud_snapshot: true
+ cloud_account_name: "aws_acc"
+ state: "present"
+- name: Delete a volume
+ ibm.storage_virtualize.ibm_svc_manage_volume:
+ clustername: "{{ clustername }}"
+ domain: "{{ domain }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ log_path: "{{ log_path }}"
+ name: "new_volume_name"
+ state: "absent"
+'''
+
+RETURN = '''#'''
+
+from traceback import format_exc
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import (
+ IBMSVCRestApi,
+ svc_argument_spec,
+ get_logger,
+ strtobool
+)
+from ansible.module_utils._text import to_native
+import random
+
+
+class IBMSVCvolume(object):
+ def __init__(self):
+ argument_spec = svc_argument_spec()
+
+ argument_spec.update(
+ dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', required=True, choices=['absent', 'present']),
+ pool=dict(type='str', required=False),
+ size=dict(type='str', required=False),
+ unit=dict(type='str', default='mb', choices=['b', 'kb',
+ 'mb', 'gb',
+ 'tb', 'pb']),
+ buffersize=dict(type='str', required=False),
+ iogrp=dict(type='str', required=False),
+ volumegroup=dict(type='str', required=False),
+ novolumegroup=dict(type='bool', required=False),
+ thin=dict(type='bool', required=False),
+ compressed=dict(type='bool', required=False),
+ deduplicated=dict(type='bool', required=False),
+ old_name=dict(type='str', required=False),
+ enable_cloud_snapshot=dict(type='bool'),
+ cloud_account_name=dict(type='str'),
+ type=dict(type='str', required=False, choices=['clone', 'thinclone']),
+ fromsourcevolume=dict(type='str', required=False),
+ allow_hs=dict(type='bool', default=False)
+ )
+ )
+
+ self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ # logging setup
+ log_path = self.module.params['log_path']
+ log = get_logger(self.__class__.__name__, log_path)
+ self.log = log.info
+
+ # Required Parameters
+ self.name = self.module.params['name']
+ self.state = self.module.params['state']
+
+ # Optional Parameters
+ self.pool = self.module.params['pool']
+ self.size = self.module.params['size']
+ self.unit = self.module.params['unit']
+ self.iogrp = self.module.params['iogrp']
+ self.buffersize = self.module.params['buffersize']
+ self.volumegroup = self.module.params['volumegroup']
+ self.novolumegroup = self.module.params['novolumegroup']
+ self.thin = self.module.params['thin']
+ self.compressed = self.module.params['compressed']
+ self.deduplicated = self.module.params['deduplicated']
+ self.old_name = self.module.params['old_name']
+ self.enable_cloud_snapshot = self.module.params['enable_cloud_snapshot']
+ self.cloud_account_name = self.module.params['cloud_account_name']
+ self.allow_hs = self.module.params['allow_hs']
+ self.type = self.module.params['type']
+ self.fromsourcevolume = self.module.params['fromsourcevolume']
+
+ # internal variable
+ self.changed = False
+
+ self.restapi = IBMSVCRestApi(
+ module=self.module,
+ clustername=self.module.params['clustername'],
+ domain=self.module.params['domain'],
+ username=self.module.params['username'],
+ password=self.module.params['password'],
+ validate_certs=self.module.params['validate_certs'],
+ log_path=log_path,
+ token=self.module.params['token']
+ )
+
+ # assemble iogrp
+ def assemble_iogrp(self):
+ if self.iogrp:
+ temp = []
+ invalid = []
+ active_iogrp = []
+ existing_iogrp = []
+ if self.iogrp:
+ existing_iogrp = [item.strip() for item in self.iogrp.split(',') if item]
+ uni_exi_iogrp = set(existing_iogrp)
+ if len(existing_iogrp) != len(uni_exi_iogrp):
+ self.module.fail_json(msg='Duplicate iogrp detected.')
+ active_iogrp = [item['name'] for item in self.restapi.svc_obj_info('lsiogrp', None, None) if int(item['node_count']) > 0]
+ for item in existing_iogrp:
+ item = item.strip()
+ if item not in active_iogrp:
+ invalid.append(item)
+ else:
+ temp.append(item)
+ if invalid:
+ self.module.fail_json(msg='Empty or non-existing iogrp detected: %s' % invalid)
+ self.iogrp = temp
+
+ # for validating mandatory parameters of the module
+ def mandatory_parameter_validation(self):
+ missing = [item[0] for item in [('name', self.name), ('state', self.state)] if not item[1]]
+ if missing:
+ self.module.fail_json(msg='Missing mandatory parameter: [{0}]'.format(', '.join(missing)))
+ if self.volumegroup and self.novolumegroup:
+ self.module.fail_json(msg='Mutually exclusive parameters detected: [volumegroup] and [novolumegroup]')
+
+ # for validating parameter while removing an existing volume
+ def volume_deletion_parameter_validation(self):
+ invalids = ('pool', 'size', 'iogrp', 'buffersize', 'volumegroup', 'novolumegroup',
+ 'thin', 'compressed', 'deduplicated', 'old_name', 'enable_cloud_snapshot',
+ 'cloud_account_name', 'allow_hs', 'type', 'fromsourcevolume')
+
+ invalid_params = ', '.join((param for param in invalids if getattr(self, param)))
+
+ if invalid_params:
+ self.module.fail_json(
+ msg='Following parameter(s) are invalid while deletion of volume: {0}'.format(invalid_params)
+ )
+
+ # for validating parameter while creating a volume
+ def volume_creation_parameter_validation(self):
+ if self.enable_cloud_snapshot in {True, False}:
+ self.module.fail_json(msg='Following parameter not applicable for creation: enable_cloud_snapshot')
+
+ if self.cloud_account_name:
+ self.module.fail_json(msg='Following parameter not applicable for creation: cloud_account_name')
+
+ if self.old_name:
+ self.module.fail_json(msg='Parameter [old_name] is not supported during volume creation.')
+
+ if (self.type and not self.fromsourcevolume) or (self.fromsourcevolume and not self.type):
+ self.module.fail_json(msg='Parameters [type] and [fromsourcevolume] parameters must be used together')
+
+ missing = []
+ if self.type and self.fromsourcevolume:
+ if not self.pool:
+ missing = ['pool']
+ if self.size:
+ self.module.fail_json(msg='Parameter [size] is invalid while creating clone or thinclone')
+ else:
+ missing = [item[0] for item in [('pool', self.pool), ('size', self.size)] if not item[1]]
+
+ if missing:
+ self.module.fail_json(msg='Missing required parameter(s) while creating: [{0}]'.format(', '.join(missing)))
+
+ # for validating parameter while renaming a volume
+ def parameter_handling_while_renaming(self):
+ if not self.old_name:
+ self.module.fail_json(msg="Parameter is required while renaming: old_name")
+ parameters = {
+ "pool": self.pool,
+ "size": self.size,
+ "iogrp": self.iogrp,
+ "buffersize": self.buffersize,
+ "volumegroup": self.volumegroup,
+ "novolumegroup": self.novolumegroup,
+ "thin": self.thin,
+ "compressed": self.compressed,
+ "deduplicated": self.deduplicated,
+ "type": self.type,
+ "fromsourcevolume": self.fromsourcevolume
+ }
+ parameters_exists = [parameter for parameter, value in parameters.items() if value]
+ if parameters_exists:
+ self.module.fail_json(msg="Parameters {0} not supported while renaming a volume.".format(parameters_exists))
+
+ # for validating if volume type is supported or not
+ def validate_volume_type(self, data):
+ unsupported_volume = False
+
+ # The value many indicates that the volume has more than one copy
+ if data[0]['type'] == "many":
+ unsupported_volume = True
+ if not unsupported_volume:
+ relationship_name = data[0]['RC_name']
+ if relationship_name:
+ rel_data = self.restapi.svc_obj_info(cmd='lsrcrelationship', cmdopts=None, cmdargs=[relationship_name])
+ if rel_data['copy_type'] == "activeactive" and not self.allow_hs:
+ unsupported_volume = True
+ if unsupported_volume:
+ self.module.fail_json(msg="The module cannot be used for managing Mirrored volume.")
+
+ # function to get existing volume data
+ def get_existing_volume(self, volume_name):
+ return self.restapi.svc_obj_info(
+ 'lsvdisk', {'bytes': True}, [volume_name]
+ )
+
+ # function to get list of associated iogrp to a volume
+ def get_existing_iogrp(self):
+ response = []
+ data = self.restapi.svc_obj_info(
+ 'lsvdiskaccess', None, [self.name]
+ )
+ if data:
+ for item in data:
+ response.append(item['IO_group_name'])
+ return response
+
+ # function to create a transient (short-lived) snapshot
+ # return value: snapshot_id
+ def create_transient_snapshot(self):
+ # Required parameters
+ snapshot_cmd = 'addsnapshot'
+ snapshot_opts = {}
+ snapshot_name = 'snapshot_' + ''.join(random.choices('0123456789', k=10))
+
+ snapshot_opts['name'] = snapshot_name
+
+ # Optional parameters
+ snapshot_opts['pool'] = self.module.params.get('pool', '')
+ snapshot_opts['volumes'] = self.module.params.get('fromsourcevolume', '')
+ snapshot_opts['retentionminutes'] = 5
+
+ addsnapshot_output = self.restapi.svc_run_command(snapshot_cmd, snapshot_opts, cmdargs=None, timeout=10)
+ snapshot_id = addsnapshot_output['id']
+
+ return snapshot_id
+
+ # function to create a new volume
+ def create_volume(self):
+ self.volume_creation_parameter_validation()
+ if self.module.check_mode:
+ self.changed = True
+ return
+ cmd = 'mkvolume'
+ cmdopts = {}
+ if self.pool:
+ cmdopts['pool'] = self.pool
+ if self.size:
+ cmdopts['size'] = self.size
+ if self.unit:
+ cmdopts['unit'] = self.unit
+ if self.iogrp:
+ cmdopts['iogrp'] = self.iogrp[0]
+ if self.volumegroup:
+ cmdopts['volumegroup'] = self.volumegroup
+ if self.thin:
+ cmdopts['thin'] = self.thin
+ if self.compressed:
+ cmdopts['compressed'] = self.compressed
+ if self.deduplicated:
+ cmdopts['deduplicated'] = self.deduplicated
+ if self.buffersize:
+ cmdopts['buffersize'] = self.buffersize
+ if self.name:
+ cmdopts['name'] = self.name
+ if self.type:
+ cmdopts['type'] = self.type
+ snapshot_id = self.create_transient_snapshot()
+ cmdopts['fromsnapshotid'] = snapshot_id
+ if self.fromsourcevolume:
+ cmdopts['fromsourcevolume'] = self.fromsourcevolume
+
+ result = self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
+ if result and 'message' in result:
+ self.changed = True
+ self.log("create volume result message %s", result['message'])
+ else:
+ self.module.fail_json(
+ msg="Failed to create volume [%s]" % self.name)
+
+ # function to remove an existing volume
+ def remove_volume(self):
+ self.volume_deletion_parameter_validation()
+ if self.module.check_mode:
+ self.changed = True
+ return
+ self.restapi.svc_run_command(
+ 'rmvolume', None, [self.name]
+ )
+ self.changed = True
+
+ # function that data in other units to b
+ def convert_to_bytes(self):
+ return int(self.size) * (1024 ** (['b', 'kb', 'mb', 'gb', 'tb', 'pb'].index((self.unit).lower())))
+
+ # function to probe an existing volume
+ def probe_volume(self, data):
+ props = {}
+ # check for changes in iogrp
+ if self.iogrp:
+ input_iogrp = set(self.iogrp)
+ existing_iogrp = set(self.get_existing_iogrp())
+ if input_iogrp ^ existing_iogrp:
+ iogrp_to_add = input_iogrp - existing_iogrp
+ iogrp_to_remove = existing_iogrp - input_iogrp
+ if iogrp_to_add:
+ props['iogrp'] = {
+ 'add': list(iogrp_to_add)
+ }
+ if iogrp_to_remove:
+ props['iogrp'] = {
+ 'remove': list(iogrp_to_remove)
+ }
+ # check for changes in volume size
+ if self.size:
+ input_size = self.convert_to_bytes()
+ existing_size = int(data[0]['capacity'])
+ if input_size != existing_size:
+ if input_size > existing_size:
+ props['size'] = {
+ 'expand': input_size - existing_size
+ }
+ elif existing_size > input_size:
+ props['size'] = {
+ 'shrink': existing_size - input_size
+ }
+ # check for changes in volumegroup
+ if self.volumegroup:
+ if self.volumegroup != data[0]['volume_group_name']:
+ props['volumegroup'] = {
+ 'name': self.volumegroup
+ }
+ # check for presence of novolumegroup
+ if self.novolumegroup:
+ if data[0]['volume_group_name']:
+ props['novolumegroup'] = {
+ 'status': True
+ }
+ # check for change in -thin parameter
+ if self.thin is not None:
+ if self.thin is True:
+ # a standard volume or a compressed volume
+ if (data[0]['capacity'] == data[1]['real_capacity']) or (data[1]['compressed_copy'] == 'yes'):
+ props['thin'] = {
+ 'status': True
+ }
+ else:
+ if (data[0]['capacity'] != data[1]['real_capacity']) or (data[1]['compressed_copy'] == 'no'):
+ props['thin'] = {
+ 'status': True
+ }
+ # check for change in -compressed parameter
+ if self.compressed is True:
+ # not a compressed volume
+ if data[1]['compressed_copy'] == 'no':
+ props['compressed'] = {
+ 'status': True
+ }
+ # check for change in -deduplicated parameter
+ if self.deduplicated is True:
+ # not a deduplicated volume
+ if data[1]['deduplicated_copy'] == 'no':
+ props['deduplicated'] = {
+ 'status': True
+ }
+ # check for change in pool
+ if self.pool:
+ if self.pool != data[0]['mdisk_grp_name']:
+ props['pool'] = {
+ 'status': True
+ }
+ # Check for change in cloud backup
+ if self.enable_cloud_snapshot is True:
+ if not strtobool(data[0].get('cloud_backup_enabled')):
+ props['cloud_backup'] = {'status': True}
+ elif self.enable_cloud_snapshot is False:
+ if strtobool(data[0].get('cloud_backup_enabled')):
+ props['cloud_backup'] = {'status': True}
+
+ if self.cloud_account_name:
+ if self.cloud_account_name != data[0].get('cloud_account_name'):
+ props['cloud_backup'] = {'status': True}
+
+ # Check for change in fromsourcevolume
+ if self.fromsourcevolume:
+ if self.fromsourcevolume != data[0].get('source_volume_name'):
+ props['fromsourcevolume'] = {'status': True}
+
+ # Check for change in type
+ if self.type:
+ if self.type != data[0].get('volume_type'):
+ props['type'] = {'status': True}
+
+ return props
+
+ # function to expand an existing volume size
+ def expand_volume(self, expand_size):
+ self.restapi.svc_run_command(
+ 'expandvdisksize',
+ {'size': expand_size, 'unit': 'b'},
+ [self.name]
+ )
+ self.changed = True
+
+ # function to shrink an existing volume size
+ def shrink_volume(self, shrink_size):
+ self.restapi.svc_run_command(
+ 'shrinkvdisksize',
+ {'size': shrink_size, 'unit': 'b'},
+ [self.name]
+ )
+ self.changed = True
+
+ # add iogrp
+ def add_iogrp(self, list_of_iogrp):
+ self.restapi.svc_run_command(
+ 'addvdiskaccess',
+ {'iogrp': ':'.join(list_of_iogrp)},
+ [self.name]
+ )
+ self.changed = True
+
+ # remove iogrp
+ def remove_iogrp(self, list_of_iogrp):
+ self.restapi.svc_run_command(
+ 'rmvdiskaccess',
+ {'iogrp': ':'.join(list_of_iogrp)},
+ [self.name]
+ )
+ self.changed = True
+
+ def update_cloud_backup(self):
+ cmdopts = {}
+
+ if self.enable_cloud_snapshot is True:
+ cmdopts['backup'] = 'cloud'
+ cmdopts['enable'] = True
+
+ if self.enable_cloud_snapshot is False:
+ cmdopts['backup'] = 'cloud'
+ cmdopts['disable'] = True
+
+ if self.cloud_account_name:
+ cmdopts['account'] = self.cloud_account_name
+
+ self.restapi.svc_run_command(
+ 'chvdisk',
+ cmdopts,
+ [self.name]
+ )
+ self.changed = True
+
+ # function to update an existing volume
+ def update_volume(self, modify):
+ # raise error for unsupported parameter
+ unsupported_parameters = ['pool', 'thin', 'compressed', 'deduplicated', 'type', 'fromsourcevolume']
+ unsupported_exists = []
+ for parameter in unsupported_parameters:
+ if parameter in modify:
+ unsupported_exists.append(parameter)
+ if unsupported_exists:
+ self.module.fail_json(msg='Update not supported for parameter: {0}'.format(unsupported_exists))
+ # when check_mode is enabled
+ if self.module.check_mode:
+ self.changed = True
+ return
+ # updating iogrps of a volume
+ if 'iogrp' in modify:
+ if 'add' in modify['iogrp']:
+ self.add_iogrp(modify['iogrp']['add'])
+ if 'remove' in modify['iogrp']:
+ self.remove_iogrp(modify['iogrp']['remove'])
+ # updating size of a volume
+ if 'size' in modify:
+ if 'expand' in modify['size']:
+ self.expand_volume(modify['size']['expand'])
+ elif 'shrink' in modify['size']:
+ self.shrink_volume(modify['size']['shrink'])
+
+ if 'cloud_backup' in modify:
+ self.update_cloud_backup()
+
+ # updating volumegroup, novolumegroup of a volume
+ cmdopts = {}
+ if 'volumegroup' in modify:
+ cmdopts['volumegroup'] = modify['volumegroup']['name']
+ if 'novolumegroup' in modify:
+ cmdopts['novolumegroup'] = modify['novolumegroup']['status']
+ if cmdopts:
+ self.restapi.svc_run_command(
+ 'chvdisk',
+ cmdopts,
+ [self.name]
+ )
+ self.changed = True
+
+ # function for renaming an existing volume with a new name
+ def volume_rename(self, volume_data):
+ msg = None
+ self.parameter_handling_while_renaming()
+ old_volume_data = self.get_existing_volume(self.old_name)
+ if not old_volume_data and not volume_data:
+ self.module.fail_json(msg="Volume [{0}] does not exists.".format(self.old_name))
+ elif old_volume_data and volume_data:
+ self.module.fail_json(msg="Volume [{0}] already exists.".format(self.name))
+ elif not old_volume_data and volume_data:
+ msg = "Volume with name [{0}] already exists.".format(self.name)
+ elif old_volume_data and not volume_data:
+ # when check_mode is enabled
+ if self.module.check_mode:
+ self.changed = True
+ return
+ self.restapi.svc_run_command('chvdisk', {'name': self.name}, [self.old_name])
+ self.changed = True
+ msg = "Volume [{0}] has been successfully rename to [{1}]".format(self.old_name, self.name)
+ return msg
+
+ def apply(self):
+ changed, msg, modify = False, None, {}
+ self.mandatory_parameter_validation()
+ volume_data = self.get_existing_volume(self.name)
+ if self.state == "present" and self.old_name:
+ msg = self.volume_rename(volume_data)
+ elif self.state == "absent" and self.old_name:
+ self.module.fail_json(msg="Rename functionality is not supported when 'state' is absent.")
+ else:
+ if self.state == 'present':
+ self.assemble_iogrp()
+ if volume_data:
+ self.validate_volume_type(volume_data)
+ if self.state == 'absent':
+ changed = True
+ elif self.state == 'present':
+ if self.type or self.fromsourcevolume:
+ # Both type and fromsourcevolume needed together
+ self.volume_creation_parameter_validation()
+ if ((volume_data[0].get('source_volume_name') and not self.fromsourcevolume) or
+ (volume_data[0].get('volume_type') and not self.type)):
+ changed = True
+
+ modify = self.probe_volume(volume_data)
+ if modify:
+ changed = True
+ else:
+ if self.state == 'present':
+ changed = True
+ if changed:
+ if self.state == 'present':
+ if not volume_data:
+ self.create_volume()
+ if isinstance(self.iogrp, list):
+ if len(self.iogrp) > 1:
+ self.add_iogrp(self.iogrp[1:])
+ msg = 'volume [%s] has been created' % self.name
+ else:
+ if modify:
+ self.update_volume(modify)
+ msg = 'volume [%s] has been modified' % self.name
+ elif self.state == 'absent':
+ self.remove_volume()
+ msg = 'volume [%s] has been deleted.' % self.name
+ else:
+ if self.state == 'absent':
+ msg = "volume [%s] did not exist." % self.name
+ else:
+ msg = "volume [%s] already exists." % self.name
+ if self.module.check_mode:
+ msg = 'Skipping changes due to check mode.'
+ self.log('skipping changes due to check mode.')
+
+ self.module.exit_json(msg=msg, changed=self.changed)
+
+
+def main():
+ v = IBMSVCvolume()
+ try:
+ v.apply()
+ except Exception as e:
+ v.log("Exception in apply(): \n%s", format_exc())
+ v.module.fail_json(msg="Module failed. Error [%s]." % to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_volumegroup.py b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_volumegroup.py
new file mode 100644
index 000000000..75cfb7994
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_manage_volumegroup.py
@@ -0,0 +1,953 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2021 IBM CORPORATION
+# Author(s): Shilpi Jain <shilpi.jain1@ibm.com>
+# Sanjaikumaar M <sanjaikumaar.m@ibm.com>
+# Sumit Kumar Gupta <sumit.gupta16@ibm.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_svc_manage_volumegroup
+short_description: This module manages volume groups on IBM Storage Virtualize family systems
+version_added: "1.6.0"
+description:
+ - Ansible interface to manage 'mkvolumegroup', 'chvolumegroup', and 'rmvolumegroup'
+ commands.
+options:
+ name:
+ description:
+ - Specifies the name for the volume group.
+ required: true
+ type: str
+ state:
+ description:
+ - Creates or updates (C(present)) or removes (C(absent)) a volume group.
+ choices: [ absent, present ]
+ required: true
+ type: str
+ clustername:
+ description:
+ - The hostname or management IP of the Storage Virtualize system.
+ required: true
+ type: str
+ domain:
+ description:
+ - Domain for the Storage Virtualize system.
+ - Valid when hostname is used for the parameter I(clustername).
+ type: str
+ username:
+ description:
+ - REST API username for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ password:
+ description:
+ - REST API password for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ token:
+ description:
+ - The authentication token to verify a user on the Storage Virtualize system.
+ - To generate a token, use the M(ibm.storage_virtualize.ibm_svc_auth) module.
+ type: str
+ log_path:
+ description:
+ - Path of debug log file.
+ type: str
+ validate_certs:
+ description:
+ - Validates certification.
+ default: false
+ type: bool
+ ownershipgroup:
+ description:
+ - Specifies the name of the ownership group to which the object is being added.
+ - I(ownershipgroup) is mutually exclusive with parameters I(safeguardpolicyname) and I(noownershipgroup).
+ - Applies when I(state=present).
+ type: str
+ noownershipgroup:
+ description:
+ - If specified `True`, the object is removed from the ownership group to which it belongs.
+ - Parameters I(ownershipgroup) and I(noownershipgroup) are mutually exclusive.
+ - Applies when I(state=present) to modify an existing volume group.
+ type: bool
+ safeguardpolicyname:
+ description:
+ - The name of the Safeguarded policy to be assigned to the volume group.
+ - I(safeguardpolicyname) is mutually exclusive with parameters I(nosafeguardpolicy) and I(ownershipgroup).
+ - Applies when I(state=present).
+ type: str
+ nosafeguardpolicy:
+ description:
+ - If specified `True`, removes the Safeguarded policy assigned to the volume group.
+ - Parameters I(safeguardpolicyname) and I(nosafeguardpolicy) are mutually exclusive.
+ - Applies when I(state=present) to modify an existing volume group.
+ type: bool
+ snapshotpolicy:
+ description:
+ - The name of the snapshot policy to be assigned to the volume group.
+ - I(snapshotpolicy) is mutually exclusive with parameters I(nosnapshotpolicy) and I(ownershipgroup).
+ - Applies when I(state=present).
+ type: str
+ version_added: 1.9.0
+ nosnapshotpolicy:
+ description:
+ - If specified `True`, removes the snapshot policy assigned to the volume group.
+ - Parameters I(snapshotpolicy) and I(nosnapshotpolicy) are mutually exclusive.
+ - Applies when I(state=present) to modify an existing volume group.
+ type: bool
+ version_added: 1.9.0
+ snapshotpolicysuspended:
+ description:
+ - Specifies whether to suspend (C(yes)) or resume (C(no)) the snapshot policy on this volume group.
+ - Applies when I(state=present) to modify an existing volume group.
+ choices: [ 'yes', 'no' ]
+ type: str
+ version_added: 1.9.0
+ policystarttime:
+ description:
+ - Specifies the time when the first Safeguarded backup is to be taken.
+ - This parameter can also be associated with snapshot policy.
+ - I(safeguardpolicyname) is required when using I(policystarttime).
+ - The accepted format is YYMMDDHHMM.
+ - Applies when I(state=present).
+ type: str
+ type:
+ description:
+ - Specifies the type of volume group to be created from the snapshot.
+ - Valid during creation of host accessible volume group from an existing snapshot.
+ choices: [ clone, thinclone ]
+ type: str
+ version_added: 1.9.0
+ snapshot:
+ description:
+ - Specifies the name of the snapshot used to prepopulate the new volumes in the new volume group.
+ - Required when creating a host accessible volume group from an existing snapshot.
+ type: str
+ version_added: 1.9.0
+ fromsourcegroup:
+ description:
+ - Specifies the parent volume group of the snapshot. This is used to prepopulate the new volume in the
+ new volume group.
+ - Valid during creation of host accessible volume group from an existing snapshot.
+ type: str
+ version_added: 1.9.0
+ pool:
+ description:
+ - Specifies the pool name where the target volumes are to be created.
+ - Valid during creation of host accessible volume group from an existing snapshot.
+ type: str
+ version_added: 1.9.0
+ iogrp:
+ description:
+ - Specifies the I/O group for new volumes.
+ - Valid during creation of host accessible volume group from an existing snapshot.
+ type: str
+ version_added: 1.9.0
+ safeguarded:
+ description:
+ - If specified, the snapshot policy creates safeguarded snapshots.
+ - Should be specified along with I(snapshotpolicy).
+ - Valid during creation and update of a volume group.
+ - Supported from Storage Virtualize family systems 8.5.2.0 or later.
+ default: false
+ type: bool
+ version_added: 1.10.0
+ ignoreuserfcmaps:
+ description:
+ - Allows user to create snapshots through the scheduler or manually with `addsnapshot`.
+ This can only be used if a volume in the volume group is used as a source of a user legacy
+ FlashCopy mapping.
+ - Valid during creation and update of a volume group.
+ - Supported from Storage Virtualize family systems 8.5.2.0 or later.
+ choices: [ 'yes', 'no' ]
+ type: str
+ version_added: 1.10.0
+ replicationpolicy:
+ description:
+ - Specifies the name of the replication policy to be assigned to the volume group.
+ - Applies when I(state=present).
+ - Supported from Storage Virtualize family systems 8.5.2.1 or later.
+ type: str
+ version_added: 1.10.0
+ noreplicationpolicy:
+ description:
+ - If specified `True`, removes the replication policy assigned to the volume group.
+ - Parameters I(replicationpolicy) and I(noreplicationpolicy) are mutually exclusive.
+ - Applies when I(state=present) to modify an existing volume group.
+ - Supported from Storage Virtualize family systems 8.5.2.1 or later.
+ type: bool
+ version_added: 1.10.0
+ old_name:
+ description:
+ - Specifies the old name of the volume group during renaming.
+ - Valid when I(state=present), to rename an existing volume group.
+ type: str
+ version_added: '2.0.0'
+ partition:
+ description:
+ - Specifies the name of the storage partition to be assigned to the volume group.
+ - Applies when I(state=present).
+ - Supported from Storage Virtualize family systems 8.6.1.0 or later.
+ type: str
+ version_added: 2.1.0
+ nopartition:
+ description:
+ - If specified `True`, removes the volume group from the storage partition.
+ - Parameters I(partition) and I(nopartition) are mutually exclusive.
+ - Applies when I(state=present) to modify an existing volume group.
+ - Supported from Storage Virtualize family systems 8.6.1.0 or later.
+ type: bool
+ version_added: 2.1.0
+ evictvolumes:
+ description:
+ - If specified `True`, delete the volume group but does not remove volumes.
+ - Applies when I(state=absent) to delete the volume group, keeping associated volumes.
+ - Supported from Storage Virtualize family systems from 8.6.2.0 or later.
+ type: bool
+ version_added: 2.2.0
+ fromsourcevolumes:
+ description:
+ - Specifies colon-separated list of the parent volumes.
+ - When combined with the type parameter and a snapshot, this allows the user to create a volumegroup with a
+ subset of those volumes whose image is present in a snapshot.
+ - Applies when I(state=present) to create volumegroup clone or thinclone, from subset of volumes of snapshot.
+ - Supported from Storage Virtualize family systems from 8.6.2.0 or later.
+ type: str
+ version_added: 2.3.0
+author:
+ - Shilpi Jain(@Shilpi-J)
+ - Sanjaikumaar M (@sanjaikumaar)
+ - Sumit Kumar Gupta (@sumitguptaibm)
+notes:
+ - This module supports C(check_mode).
+ - Safeguarded policy and snapshot policy cannot be used at the same time.
+ Therefore, the parameters I(snapshotpolicy) and I(safeguardpolicyname) are mutually exclusive.
+'''
+
+EXAMPLES = '''
+- name: Create a new volume group
+ ibm.storage_virtualize.ibm_svc_manage_volumegroup:
+ clustername: "{{ clustername }}"
+ domain: "{{ domain }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ log_path: /tmp/playbook.debug
+ name: vg0
+ state: present
+- name: Delete a volume group
+ ibm.storage_virtualize.ibm_svc_manage_volumegroup:
+ clustername: "{{ clustername }}"
+ domain: "{{ domain }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ log_path: /tmp/playbook.debug
+ name: vg0
+ state: absent
+- name: Update existing volume group to remove ownershipgroup and attach a safeguardpolicy to it
+ ibm.storage_virtualize.ibm_svc_manage_volumegroup:
+ clustername: "{{ clustername }}"
+ domain: "{{ domain }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ log_path: /tmp/playbook.debug
+ name: vg0
+ state: present
+ noownershipgroup: True
+ safeguardpolicyname: sg1
+- name: Update volumegroup with snapshot policy and remove safeguarded policy
+ ibm.storage_virtualize.ibm_svc_manage_volumegroup:
+ clustername: "{{ clustername }}"
+ domain: "{{ domain }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ log_path: /tmp/playbook.debug
+ name: vg0
+ nosafeguardpolicy: true
+ snapshotpolicy: sp1
+ state: present
+- name: Update volumegroup with safeguarded snapshot policy and ignoreuserfcmaps
+ ibm.storage_virtualize.ibm_svc_manage_volumegroup:
+ clustername: "{{ clustername }}"
+ domain: "{{ domain }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ log_path: /tmp/playbook.debug
+ name: vg0
+ safeguarded: true
+ snapshotpolicy: sp1
+ ignoreuserfcmaps: yes
+ state: present
+- name: Suspend snapshot policy in an existing volume group
+ ibm.storage_virtualize.ibm_svc_manage_volumegroup:
+ clustername: "{{ clustername }}"
+ domain: "{{ domain }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ log_path: /tmp/playbook.debug
+ name: vg0
+ snapshotpolicysuspended: true
+ state: present
+- name: Create host accessible volume group from an existing snapshot
+ ibm.storage_virtualize.ibm_svc_manage_volumegroup:
+ clustername: "{{ clustername }}"
+ domain: "{{ domain }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ log_path: /tmp/playbook.debug
+ name: host_accessible_vg
+ type: clone
+ snapshot: snapshot0
+ fromsourcegroup: vg0
+ pool: Pool0
+ state: present
+- name: Create a volumegroup thinclone from a list of volumes
+ ibm.storage_virtualize.ibm_svc_manage_volumegroup:
+ clustername: "{{ clustername }}"
+ domain: "{{ domain }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ log_path: /tmp/playbook.debug
+ name: vg0
+ type: thinclone
+ fromsourcevolumes: vol1:vol2
+ pool: Pool0
+ state: present
+- name: Create a volumegroup clone from a list of volumes
+ ibm.storage_virtualize.ibm_svc_manage_volumegroup:
+ clustername: "{{ clustername }}"
+ domain: "{{ domain }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ log_path: /tmp/playbook.debug
+ name: vg0
+ type: clone
+ fromsourcevolumes: vol1:vol2
+ pool: Pool0
+ state: present
+- name: Delete a volume group, keeping volumes which were associated with volumegroup
+ ibm.storage_virtualize.ibm_svc_manage_volumegroup:
+ clustername: "{{ clustername }}"
+ domain: "{{ domain }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ log_path: /tmp/playbook.debug
+ name: vg0
+ state: absent
+ evictvolumes: true
+'''
+
+RETURN = '''#'''
+
+from traceback import format_exc
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import \
+ IBMSVCRestApi, svc_argument_spec, get_logger, strtobool
+from ansible.module_utils._text import to_native
+import random
+
+
+class IBMSVCVG(object):
+ def __init__(self):
+ argument_spec = svc_argument_spec()
+
+ argument_spec.update(
+ dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', required=True, choices=['absent',
+ 'present']),
+ ownershipgroup=dict(type='str', required=False),
+ noownershipgroup=dict(type='bool', required=False),
+ safeguardpolicyname=dict(type='str', required=False),
+ nosafeguardpolicy=dict(type='bool', required=False),
+ policystarttime=dict(type='str', required=False),
+ snapshotpolicy=dict(type='str', required=False),
+ nosnapshotpolicy=dict(type='bool', required=False),
+ snapshotpolicysuspended=dict(type='str', choices=['yes', 'no']),
+ type=dict(type='str', choices=['clone', 'thinclone']),
+ snapshot=dict(type='str'),
+ fromsourcegroup=dict(type='str'),
+ fromsourcevolumes=dict(type='str', required=False),
+ pool=dict(type='str'),
+ iogrp=dict(type='str'),
+ safeguarded=dict(type='bool', default=False),
+ ignoreuserfcmaps=dict(type='str', choices=['yes', 'no']),
+ replicationpolicy=dict(type='str'),
+ noreplicationpolicy=dict(type='bool'),
+ old_name=dict(type='str', required=False),
+ partition=dict(type='str'),
+ nopartition=dict(type='bool'),
+ evictvolumes=dict(type='bool')
+ )
+ )
+
+ self.module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ # logging setup
+ log_path = self.module.params['log_path']
+ log = get_logger(self.__class__.__name__, log_path)
+ self.log = log.info
+
+ # Required
+ self.name = self.module.params['name']
+ self.state = self.module.params['state']
+
+ # Optional
+ self.ownershipgroup = self.module.params.get('ownershipgroup', '')
+ self.noownershipgroup = self.module.params.get('noownershipgroup', False)
+ self.policystarttime = self.module.params.get('policystarttime', '')
+ self.snapshotpolicy = self.module.params.get('snapshotpolicy', '')
+ self.nosnapshotpolicy = self.module.params.get('nosnapshotpolicy', False)
+ self.snapshotpolicysuspended = self.module.params.get('snapshotpolicysuspended', '')
+ self.type = self.module.params.get('type', '')
+ self.snapshot = self.module.params.get('snapshot', '')
+ self.fromsourcegroup = self.module.params.get('fromsourcegroup', '')
+ self.fromsourcevolumes = self.module.params.get('fromsourcevolumes', '')
+ self.pool = self.module.params.get('pool', '')
+ self.iogrp = self.module.params.get('iogrp', '')
+ self.safeguardpolicyname = self.module.params.get('safeguardpolicyname', '')
+ self.nosafeguardpolicy = self.module.params.get('nosafeguardpolicy', False)
+ self.safeguarded = self.module.params.get('safeguarded', False)
+ self.ignoreuserfcmaps = self.module.params.get('ignoreuserfcmaps', '')
+ self.replicationpolicy = self.module.params.get('replicationpolicy', '')
+ self.noreplicationpolicy = self.module.params.get('noreplicationpolicy', False)
+ self.old_name = self.module.params.get('old_name', '')
+ self.partition = self.module.params.get('partition', '')
+ self.nopartition = self.module.params.get('nopartition', False)
+ self.evictvolumes = self.module.params.get('evictvolumes', False)
+
+ # Dynamic variable
+ self.parentuid = None
+ self.changed = False
+ self.msg = ''
+
+ self.basic_checks()
+
+ self.restapi = IBMSVCRestApi(
+ module=self.module,
+ clustername=self.module.params['clustername'],
+ domain=self.module.params['domain'],
+ username=self.module.params['username'],
+ password=self.module.params['password'],
+ validate_certs=self.module.params['validate_certs'],
+ log_path=log_path,
+ token=self.module.params['token']
+ )
+
+ def basic_checks(self):
+ changed = False
+ if not self.name:
+ self.module.fail_json(msg='Missing mandatory parameter: name')
+
+ if self.state == 'present':
+ if self.policystarttime:
+ if not self.snapshotpolicy and not self.safeguardpolicyname:
+ self.module.fail_json(
+ msg='Either `snapshotpolicy` or `safeguardpolicyname` should be passed along with `policystarttime`.'
+ )
+ if self.safeguarded:
+ if not self.snapshotpolicy:
+ self.module.fail_json(
+ msg='Parameter `safeguarded` should be passed along with `snapshotpolicy`'
+ )
+ if self.evictvolumes is not None:
+ self.module.fail_json(
+ msg='Parameter `evictvolumes` should be passed only while removing volumegroup'
+ )
+ elif self.state == 'absent':
+ unwanted = ('ownershipgroup', 'noownershipgroup', 'safeguardpolicyname',
+ 'nosafeguardpolicy', 'snapshotpolicy', 'nosnapshotpolicy',
+ 'policystarttime', 'type', 'fromsourcegroup', 'pool', 'iogrp',
+ 'safeguarded', 'ignoreuserfcmaps', 'replicationpolicy',
+ 'noreplicationpolicy', 'old_name', 'fromsourcevolumes')
+
+ param_exists = ', '.join((param for param in unwanted if getattr(self, param)))
+
+ if param_exists:
+ self.module.fail_json(
+ msg='State=absent but following parameter(s) exist: {0}'.format(param_exists),
+ changed=changed
+ )
+ else:
+ self.module.fail_json(msg='State should be either present or absent')
+
+ def parameter_handling_while_renaming(self):
+ parameters = {
+ "ownershipgroup": self.ownershipgroup,
+ "noownershipgroup": self.noownershipgroup,
+ "replicationpolicy": self.replicationpolicy,
+ "noreplicationpolicy": self.noreplicationpolicy,
+ "safeguardpolicyname": self.safeguardpolicyname,
+ "nosafeguardpolicy": self.nosafeguardpolicy,
+ "snapshotpolicy": self.snapshotpolicy,
+ "nosnapshotpolicy": self.nosnapshotpolicy,
+ "partition": self.partition,
+ "nopartition": self.nopartition,
+ "fromsourcevolumes": self.fromsourcevolumes
+ }
+ parameters_exists = [parameter for parameter, value in parameters.items() if value]
+ if parameters_exists:
+ self.module.fail_json(msg="Parameters {0} not supported while renaming a volume group.".format(', '.join(parameters_exists)))
+
+ def create_validation(self):
+ mutually_exclusive = (
+ ('ownershipgroup', 'safeguardpolicyname'),
+ ('ownershipgroup', 'snapshotpolicy'),
+ ('ownershipgroup', 'policystarttime'),
+ ('snapshotpolicy', 'safeguardpolicyname'),
+ ('replicationpolicy', 'noreplicationpolicy'),
+ ('partition', 'nopartition')
+ )
+
+ for param1, param2 in mutually_exclusive:
+ if getattr(self, param1) and getattr(self, param2):
+ self.module.fail_json(
+ msg='Mutually exclusive parameters: {0}, {1}'.format(param1, param2)
+ )
+
+ unsupported = ('nosafeguardpolicy', 'noownershipgroup', 'nosnapshotpolicy',
+ 'snapshotpolicysuspended', 'noreplicationpolicy')
+ unsupported_exists = ', '.join((field for field in unsupported if getattr(self, field)))
+
+ if unsupported_exists:
+ self.module.fail_json(
+ msg='Following paramters not supported during creation scenario: {0}'.format(unsupported_exists)
+ )
+
+ if self.type and not self.snapshot and not self.fromsourcevolumes:
+ self.module.fail_json(
+ msg='type={0} requires either snapshot or fromsourcevolumes parameter'.format(self.type)
+ )
+
+ def update_validation(self, data):
+ mutually_exclusive = (
+ ('ownershipgroup', 'noownershipgroup'),
+ ('safeguardpolicyname', 'nosafeguardpolicy'),
+ ('ownershipgroup', 'safeguardpolicyname'),
+ ('ownershipgroup', 'snapshotpolicy'),
+ ('ownershipgroup', 'policystarttime'),
+ ('nosafeguardpolicy', 'nosnapshotpolicy'),
+ ('snapshotpolicy', 'nosnapshotpolicy'),
+ ('snapshotpolicy', 'safeguardpolicyname'),
+ ('replicationpolicy', 'noreplicationpolicy'),
+ ('partition', 'nopartition')
+ )
+
+ for param1, param2 in mutually_exclusive:
+ if getattr(self, param1) and getattr(self, param2):
+ self.module.fail_json(
+ msg='Mutually exclusive parameters: {0}, {1}'.format(param1, param2)
+ )
+
+ unsupported_maps = (
+ ('type', data.get('volume_group_type', '')),
+ ('snapshot', data.get('source_snapshot', '')),
+ ('fromsourcevolumes', data.get('source_volumes_set', '')),
+ ('fromsourcegroup', data.get('source_volume_group_name', ''))
+ )
+ unsupported = (
+ fields[0] for fields in unsupported_maps if getattr(self, fields[0]) and getattr(self, fields[0]) != fields[1]
+ )
+ unsupported_exists = ', '.join(unsupported)
+
+ if unsupported_exists:
+ self.module.fail_json(
+ msg='Following paramters not supported during update: {0}'.format(unsupported_exists)
+ )
+
+ def get_existing_vg(self, vg_name):
+ merged_result = {}
+
+ data = self.restapi.svc_obj_info(cmd='lsvolumegroup', cmdopts=None,
+ cmdargs=['-gui', vg_name])
+
+ if isinstance(data, list):
+ for d in data:
+ merged_result.update(d)
+ else:
+ merged_result = data
+
+ if merged_result and ((self.snapshotpolicy and self.policystarttime) or self.snapshotpolicysuspended):
+ # Making new call as snapshot_policy_start_time not available in lsvolumegroup CLI
+ SP_data = self.restapi.svc_obj_info(
+ cmd='lsvolumegroupsnapshotpolicy',
+ cmdopts=None,
+ cmdargs=[self.name]
+ )
+ merged_result['snapshot_policy_start_time'] = SP_data['snapshot_policy_start_time']
+ merged_result['snapshot_policy_suspended'] = SP_data['snapshot_policy_suspended']
+
+ # Make new call as volume list is not present in lsvolumegroup CLI
+ # If existing volumegroup is a thinclone but command params don't contain
+ # [type], that is also considered as an attempt to create/change an already
+ # existing volume. So, it should be recorded to throw error later.
+ is_existing_vg_thinclone = False
+ if merged_result and 'volume_group_type' in merged_result and merged_result['volume_group_type'] == 'thinclone':
+ is_existing_vg_thinclone = True
+ if merged_result and (self.type and self.fromsourcevolumes) or\
+ is_existing_vg_thinclone is True:
+ volumes_data = []
+ if self.type == "thinclone" or is_existing_vg_thinclone is True:
+ cmd = 'lsvolumepopulation'
+ cmdopts = {"filtervalue": "volume_group_name={0}".format(self.name)}
+ volumes_data = self.restapi.svc_obj_info(cmd, cmdopts, cmdargs=None)
+ else:
+ # Source volumes for clone volumes needs to be fetched for verification
+ # 1. First get the volumes associated with volumegroup provided
+ associated_volumes_data = []
+ cmd = 'lsvdisk'
+ cmdopts = {"filtervalue": "volume_group_name={0}".format(self.name)}
+ associated_volumes_data = self.restapi.svc_obj_info(cmd, cmdopts, cmdargs=None)
+ vol_names = set()
+ for vol in associated_volumes_data:
+ vol_names.add(vol['name'])
+
+ # 2. Run lsvdisk for each volume provided in command to get source_volume_name
+ for volname in vol_names:
+ cmd = 'lsvdisk' + "/" + volname
+ cmdopts = None
+ cmdargs = None
+ single_vol_data = self.restapi.svc_obj_info(cmd, cmdopts, cmdargs=None)
+ if single_vol_data:
+ volumes_data.append(single_vol_data[0])
+
+ # Make a set from source volumes of all volumes
+ if volumes_data:
+ source_volumes_set = set()
+ source_volumes_pool_set = set()
+ for volume_data in volumes_data:
+ # Add the value of 'source_volume_name' to the merged_result
+ source_volumes_set.add(volume_data['source_volume_name'])
+ merged_result['source_volumes_set'] = source_volumes_set
+ # If pool is provided, verify that pool matches with the one provided in command
+ if self.pool:
+ cmd = 'lsvdisk'
+ cmdopts = {"filtervalue": "parent_mdisk_grp_name={0}".format(self.pool)}
+
+ vdisks_data = self.restapi.svc_obj_info(cmd, cmdopts, cmdargs=None)
+ remaining_vdisks = len(source_volumes_set)
+ for vdisk_data in vdisks_data:
+ if vdisk_data['name'] in source_volumes_set:
+ source_volumes_pool_set.add(vdisk_data['parent_mdisk_grp_name'])
+ remaining_vdisks = remaining_vdisks - 1
+ if remaining_vdisks == 0:
+ break
+
+ merged_result['source_volumes_pool_set'] = source_volumes_pool_set
+
+ return merged_result
+
+ def set_parentuid(self):
+ if self.snapshot and not self.fromsourcegroup:
+ cmdopts = {
+ "filtervalue": "snapshot_name={0}".format(self.snapshot)
+ }
+ data = self.restapi.svc_obj_info(
+ cmd='lsvolumesnapshot',
+ cmdopts=cmdopts,
+ cmdargs=None
+ )
+ try:
+ result = next(
+ filter(
+ lambda obj: obj['volume_group_name'] == '',
+ data
+ )
+ )
+ except StopIteration:
+ self.module.fail_json(
+ msg='Orphan Snapshot ({0}) does not exists for the given name'.format(self.snapshot)
+ )
+ else:
+ self.parentuid = result['parent_uid']
+
+ def vg_probe(self, data):
+ self.update_validation(data)
+ # Mapping the parameters with the existing data for comparision
+ params_mapping = (
+ ('ownershipgroup', data.get('owner_name', '')),
+ ('ignoreuserfcmaps', data.get('ignore_user_flash_copy_maps', '')),
+ ('replicationpolicy', data.get('replication_policy_name', '')),
+ ('noownershipgroup', not bool(data.get('owner_name', ''))),
+ ('nosafeguardpolicy', not bool(data.get('safeguarded_policy_name', ''))),
+ ('nosnapshotpolicy', not bool(data.get('snapshot_policy_name', ''))),
+ ('noreplicationpolicy', not bool(data.get('replication_policy_name', ''))),
+ ('partition', data.get('partition_name', '')),
+ ('nopartition', not bool(data.get('partition_name', '')))
+ )
+
+ props = dict((k, getattr(self, k)) for k, v in params_mapping if getattr(self, k) and getattr(self, k) != v)
+
+ if self.safeguardpolicyname and self.safeguardpolicyname != data.get('safeguarded_policy_name', ''):
+ props['safeguardedpolicy'] = self.safeguardpolicyname
+ # If policy is changed, existing policystarttime will be erased so adding time without any check
+ if self.policystarttime:
+ props['policystarttime'] = self.policystarttime
+ elif self.safeguardpolicyname:
+ if self.policystarttime and self.policystarttime + '00' != data.get('safeguarded_policy_start_time', ''):
+ props['safeguardedpolicy'] = self.safeguardpolicyname
+ props['policystarttime'] = self.policystarttime
+ elif self.snapshotpolicy and self.snapshotpolicy != data.get('snapshot_policy_name', ''):
+ props['snapshotpolicy'] = self.snapshotpolicy
+ props['safeguarded'] = self.safeguarded
+ if self.policystarttime:
+ props['policystarttime'] = self.policystarttime
+ elif self.snapshotpolicy:
+ if self.policystarttime and self.policystarttime + '00' != data.get('snapshot_policy_start_time', ''):
+ props['snapshotpolicy'] = self.snapshotpolicy
+ props['policystarttime'] = self.policystarttime
+ if self.safeguarded not in ('', None) and self.safeguarded != strtobool(data.get('snapshot_policy_safeguarded', 0)):
+ props['snapshotpolicy'] = self.snapshotpolicy
+ props['safeguarded'] = self.safeguarded
+
+ # Adding snapshotpolicysuspended to props
+ if self.snapshotpolicysuspended and self.snapshotpolicysuspended != data.get('snapshot_policy_suspended', ''):
+ props['snapshotpolicysuspended'] = self.snapshotpolicysuspended
+
+ self.log("volumegroup props = %s", props)
+
+ return props
+
+ def create_transient_snapshot(self):
+ # Required parameters
+ snapshot_cmd = 'addsnapshot'
+ snapshot_opts = {}
+ random_number = ''.join(random.choices('0123456789', k=10))
+ snapshot_name = f"snapshot_{random_number}"
+ snapshot_opts['name'] = snapshot_name
+
+ # Optional parameters
+ snapshot_opts['pool'] = self.module.params.get('pool', '')
+ snapshot_opts['volumes'] = self.module.params.get('fromsourcevolumes', '')
+ snapshot_opts['retentionminutes'] = 5
+
+ self.restapi.svc_run_command(snapshot_cmd, snapshot_opts, cmdargs=None, timeout=10)
+ return snapshot_name
+
+ def vg_create(self):
+ self.create_validation()
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ self.log("creating volume group '%s'", self.name)
+
+ # Make command
+ cmd = 'mkvolumegroup'
+ cmdopts = {
+ 'name': self.name,
+ 'safeguarded': self.safeguarded
+ }
+
+ if self.type:
+ optional_params = ('type', 'snapshot', 'pool')
+ cmdopts.update(
+ dict(
+ (param, getattr(self, param)) for param in optional_params if getattr(self, param)
+ )
+ )
+ if self.iogrp:
+ cmdopts['iogroup'] = self.iogrp
+
+ if self.fromsourcevolumes:
+ cmdopts['fromsourcevolumes'] = self.fromsourcevolumes
+ if not self.snapshot:
+ # If thinclone or clone is to be created from volumes, do following:
+ # 1. Create transient snapshot with 5-min retentionminutes
+ # 2. Create a thinclone volumegroup from this snapshot
+ # 3. There is no need to delete snapshot, as it is auto-managed due to retentionminutes
+ try:
+ self.snapshot = self.create_transient_snapshot()
+ cmdopts['snapshot'] = self.snapshot
+ except Exception as e:
+ self.log('Exception in creating transient snapshot: %s', format_exc())
+ self.module.fail_json(msg='Module failed. Error [%s].' % to_native(e))
+ self.set_parentuid()
+ if self.parentuid:
+ cmdopts['fromsourceuid'] = self.parentuid
+ elif self.fromsourcegroup:
+ cmdopts['fromsourcegroup'] = self.fromsourcegroup
+
+ if self.ignoreuserfcmaps:
+ if self.ignoreuserfcmaps == 'yes':
+ cmdopts['ignoreuserfcmaps'] = True
+ else:
+ cmdopts['ignoreuserfcmaps'] = False
+
+ if self.replicationpolicy:
+ cmdopts['replicationpolicy'] = self.replicationpolicy
+
+ if self.partition:
+ cmdopts['partition'] = self.partition
+
+ if self.ownershipgroup:
+ cmdopts['ownershipgroup'] = self.ownershipgroup
+ elif self.safeguardpolicyname:
+ cmdopts['safeguardedpolicy'] = self.safeguardpolicyname
+ if self.policystarttime:
+ cmdopts['policystarttime'] = self.policystarttime
+ elif self.snapshotpolicy:
+ cmdopts['snapshotpolicy'] = self.snapshotpolicy
+ if self.policystarttime:
+ cmdopts['policystarttime'] = self.policystarttime
+
+ self.log("creating volumegroup '%s'", cmdopts)
+
+ # Run command
+ result = self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
+ self.log("create volume group result %s", result)
+ # Any error would have been raised in svc_run_command
+ self.changed = True
+
+ def vg_update(self, modify):
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ # update the volume group
+ self.log("updating volume group '%s' ", self.name)
+ cmdargs = [self.name]
+
+ try:
+ del modify['snapshotpolicysuspended']
+ except KeyError:
+ self.log("snapshotpolicysuspended modification not reqiured!!")
+ else:
+ cmd = 'chvolumegroupsnapshotpolicy'
+ cmdopts = {'snapshotpolicysuspended': self.snapshotpolicysuspended}
+ self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
+
+ cmd = 'chvolumegroup'
+ unmaps = ('noownershipgroup', 'nosafeguardpolicy', 'nosnapshotpolicy', 'noreplicationpolicy')
+ for field in unmaps:
+ cmdopts = {}
+ if field == 'nosafeguardpolicy' and field in modify:
+ cmdopts['nosafeguardedpolicy'] = modify.pop('nosafeguardpolicy')
+ self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
+ elif field in modify:
+ cmdopts[field] = modify.pop(field)
+ self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
+ if modify:
+ cmdopts = modify
+ self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
+ # Any error would have been raised in svc_run_command
+ self.changed = True
+
+ def vg_delete(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ self.log("deleting volume group '%s'", self.name)
+
+ cmd = 'rmvolumegroup'
+ cmdopts = {}
+ cmdargs = [self.name]
+ if self.evictvolumes is not None:
+ cmdopts['evictvolumes'] = self.evictvolumes
+
+ self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
+ # Any error will have been raised in svc_run_command
+ self.changed = True
+
+ def vg_rename(self, vg_data):
+ msg = ''
+ self.parameter_handling_while_renaming()
+ old_vg_data = self.get_existing_vg(self.old_name)
+
+ if not old_vg_data and not vg_data:
+ self.module.fail_json(msg="Volume group with old name {0} doesn't exist.".format(self.old_name))
+ elif old_vg_data and vg_data:
+ self.module.fail_json(msg="Volume group [{0}] already exists.".format(self.name))
+ elif not old_vg_data and vg_data:
+ msg = "Volume group with name [{0}] already exists.".format(self.name)
+ elif old_vg_data and not vg_data:
+ # when check_mode is enabled
+ if self.module.check_mode:
+ self.changed = True
+ return
+ self.restapi.svc_run_command('chvolumegroup', {'name': self.name}, [self.old_name])
+ self.changed = True
+ msg = "Volume group [{0}] has been successfully rename to [{1}].".format(self.old_name, self.name)
+ return msg
+
+ def apply(self):
+ vg_data = self.get_existing_vg(self.name)
+
+ if self.state == 'present' and self.old_name:
+ self.msg = self.vg_rename(vg_data)
+ elif self.state == 'absent' and self.old_name:
+ self.module.fail_json(msg="Rename functionality is not supported when 'state' is absent.")
+ else:
+ if vg_data:
+ if self.state == 'present':
+ is_existing_vg_thinclone = False
+ if vg_data.get('volume_group_type') == 'thinclone':
+ is_existing_vg_thinclone = True
+ if (self.type and self.fromsourcevolumes) or is_existing_vg_thinclone is True:
+ # Check whether provided source volumes are same as in existing volumegroup
+ volumes_with_existing_vg = None
+ if 'source_volumes_set' in vg_data:
+ volumes_with_existing_vg = vg_data['source_volumes_set']
+ provided_volumes_set = set()
+ if self.fromsourcevolumes:
+ provided_volumes_set = set(self.fromsourcevolumes.split(":"))
+ if volumes_with_existing_vg or provided_volumes_set:
+ self.changed = False
+ if not provided_volumes_set and volumes_with_existing_vg:
+ self.module.fail_json(
+ msg="Existing thinclone volumegroup found.",
+ changed=self.changed
+ )
+ if volumes_with_existing_vg != provided_volumes_set:
+ self.module.fail_json(
+ msg="Parameter [fromsourcevolumes] is invalid for modifying volumegroup.",
+ changed=self.changed
+ )
+ elif self.pool and vg_data['source_volumes_pool_set'] and (list(vg_data['source_volumes_pool_set'])[0] != self.pool):
+ self.module.fail_json(
+ msg="Parameter [pool] is invalid for modifying volumegroup.",
+ changed=self.changed
+ )
+ else:
+ self.msg = "A volumegroup with name [%s] already exists." % self.name
+ else:
+ modify = self.vg_probe(vg_data)
+ if modify:
+ self.vg_update(modify)
+ self.msg = "volume group [%s] has been modified." % self.name
+ else:
+ self.msg = "No Modifications detected, Volume group already exists."
+ else:
+ self.vg_delete()
+ self.msg = "volume group [%s] has been deleted." % self.name
+ else:
+ if self.state == 'absent':
+ self.msg = "Volume group [%s] does not exist." % self.name
+ else:
+ self.vg_create()
+ self.msg = "volume group [%s] has been created." % self.name
+
+ if self.module.check_mode:
+ self.msg = 'skipping changes due to check mode.'
+
+ self.module.exit_json(msg=self.msg, changed=self.changed)
+
+
+def main():
+ v = IBMSVCVG()
+ try:
+ v.apply()
+ except Exception as e:
+ v.log("Exception in apply(): \n%s", format_exc())
+ v.module.fail_json(msg="Module failed. Error [%s]." % to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_mdisk.py b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_mdisk.py
new file mode 100644
index 000000000..a8ab0820e
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_mdisk.py
@@ -0,0 +1,440 @@
+#!/usr/bin/python
+# Copyright (C) 2020 IBM CORPORATION
+# Author(s): Peng Wang <wangpww@cn.ibm.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_svc_mdisk
+short_description: This module manages MDisks on IBM Storage Virtualize family systems
+description:
+ - Ansible interface to manage 'mkarray' and 'rmmdisk' MDisk commands.
+version_added: "1.0.0"
+options:
+ name:
+ description:
+ - The MDisk name.
+ required: true
+ type: str
+ state:
+ description:
+ - Creates (C(present)) or removes (C(absent)) the MDisk.
+ choices: [ absent, present ]
+ required: true
+ type: str
+ clustername:
+ description:
+ - The hostname or management IP of the Storage Virtualize system.
+ type: str
+ required: true
+ domain:
+ description:
+ - Domain for the Storage Virtualize system.
+ - Valid when hostname is used for the parameter I(clustername).
+ type: str
+ username:
+ description:
+ - REST API username for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ password:
+ description:
+ - REST API password for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ token:
+ description:
+ - The authentication token to verify a user on the Storage Virtualize system.
+ - To generate a token, use the ibm_svc_auth module.
+ type: str
+ version_added: '1.5.0'
+ drive:
+ description:
+ - Drive(s) to use as members of the RAID array.
+ - Required when I(state=present), to create an MDisk array.
+ type: str
+ mdiskgrp:
+ description:
+ - The storage pool (mdiskgrp) to which you want to add the MDisk.
+ type: str
+ required: true
+ log_path:
+ description:
+ - Path of debug log file.
+ type: str
+ validate_certs:
+ description:
+ - Validates certification.
+ default: false
+ type: bool
+ level:
+ description:
+ - Specifies the RAID level.
+ - Required when I(state=present), to create an MDisk array.
+ type: str
+ choices: ['raid0', 'raid1', 'raid5', 'raid6', 'raid10']
+ encrypt:
+ description:
+ - Defines use of encryption with the MDisk group.
+ - Applies when I(state=present).
+ type: str
+ default: 'no'
+ choices: ['yes', 'no']
+ driveclass:
+ description:
+ - Specifies the class that is being used to create the array.
+ - Applies when I(state=present).
+ type: str
+ version_added: '2.0.0'
+ drivecount:
+ description:
+ - Specifies the number of the drives.
+ - The value must be a number in the range 2 - 128.
+ - Applies when I(state=present).
+ type: str
+ version_added: '2.0.0'
+ stripewidth:
+ description:
+ - Specifies the width of a single unit of redundancy within a distributed set of drives
+ - The value must be a number in the range 2 - 16.
+ - Applies when I(state=present).
+ type: str
+ version_added: '2.0.0'
+ old_name:
+ description:
+ - Specifies the old name of an existing pool.
+ - Applies when I(state=present), to rename the existing pool.
+ type: str
+ version_added: '2.0.0'
+author:
+ - Peng Wang(@wangpww)
+notes:
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = '''
+- name: Create MDisk and name as mdisk20
+ ibm.storage_virtualize.ibm_svc_mdisk:
+ clustername: "{{clustername}}"
+ domain: "{{domain}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ name: mdisk20
+ state: present
+ level: raid0
+ drive: '5:6'
+ encrypt: no
+ mdiskgrp: pool20
+- name: Delete MDisk named mdisk20
+ ibm.storage_virtualize.ibm_svc_mdisk:
+ clustername: "{{clustername}}"
+ domain: "{{domain}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ name: mdisk20
+ state: absent
+ mdiskgrp: pool20
+'''
+
+RETURN = '''#'''
+
+from traceback import format_exc
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi, svc_argument_spec, get_logger
+
+
+class IBMSVCmdisk(object):
+ def __init__(self):
+ argument_spec = svc_argument_spec()
+
+ argument_spec.update(
+ dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', required=True, choices=['absent',
+ 'present']),
+ level=dict(type='str', choices=['raid0', 'raid1', 'raid5',
+ 'raid6', 'raid10']),
+ drive=dict(type='str', default=None),
+ encrypt=dict(type='str', default='no', choices=['yes', 'no']),
+ mdiskgrp=dict(type='str', required=True),
+ driveclass=dict(type='str'),
+ drivecount=dict(type='str'),
+ stripewidth=dict(type='str'),
+ old_name=dict(type='str')
+ )
+ )
+
+ mutually_exclusive = []
+ self.module = AnsibleModule(argument_spec=argument_spec,
+ mutually_exclusive=mutually_exclusive,
+ supports_check_mode=True)
+
+ # logging setup
+ log_path = self.module.params['log_path']
+ log = get_logger(self.__class__.__name__, log_path)
+ self.log = log.info
+
+ # Required
+ self.name = self.module.params['name']
+ self.state = self.module.params['state']
+
+ # Optional
+ self.level = self.module.params.get('level', None)
+ self.drive = self.module.params.get('drive', None)
+ self.encrypt = self.module.params.get('encrypt', None)
+ self.mdiskgrp = self.module.params.get('mdiskgrp', None)
+ self.driveclass = self.module.params.get('driveclass', '')
+ self.drivecount = self.module.params.get('drivecount', '')
+ self.stripewidth = self.module.params.get('stripewidth', '')
+ self.old_name = self.module.params.get('old_name', '')
+
+ # internal variable
+ self.changed = False
+
+ self.basic_checks()
+
+ self.restapi = IBMSVCRestApi(
+ module=self.module,
+ clustername=self.module.params['clustername'],
+ domain=self.module.params['domain'],
+ username=self.module.params['username'],
+ password=self.module.params['password'],
+ validate_certs=self.module.params['validate_certs'],
+ log_path=log_path,
+ token=self.module.params['token']
+ )
+
+ def basic_checks(self):
+ # Handling missing mandatory parameters name
+ if not self.name:
+ self.module.fail_json(msg='Missing mandatory parameter: name')
+
+ if self.state == 'present':
+ if self.drive and (self.drivecount or self.driveclass or self.stripewidth):
+ self.module.fail_json(msg="The parameters 'drive' and "
+ "'driveclass, drivecount, stripewidth' are mutually exclusive.")
+ elif self.state == 'absent':
+ invalids = ('drive', 'driveclass', 'level', 'drivecount', 'old_name', 'stripewidth')
+ invalid_exists = ', '.join((var for var in invalids if getattr(self, var) not in {'', None}))
+
+ if invalid_exists:
+ self.module.fail_json(
+ msg='Following parameters are not applicable while deleting: {0}'.format(invalid_exists))
+
+ def mdisk_exists(self, name):
+ merged_result = {}
+ data = self.restapi.svc_obj_info(
+ cmd='lsmdisk',
+ cmdopts={},
+ cmdargs=['-gui', name]
+ )
+
+ if isinstance(data, list):
+ for d in data:
+ merged_result.update(d)
+ else:
+ merged_result = data
+
+ return merged_result
+
+ def mdisk_rename(self, mdisk_data):
+ msg = None
+ old_mdisk_data = self.mdisk_exists(self.old_name)
+ if not old_mdisk_data and not mdisk_data:
+ self.module.fail_json(msg="mdisk [{0}] does not exists.".format(self.old_name))
+ elif old_mdisk_data and mdisk_data:
+ self.module.fail_json(msg="mdisk with name [{0}] already exists.".format(self.name))
+ elif not old_mdisk_data and mdisk_data:
+ msg = "mdisk [{0}] already renamed.".format(self.name)
+ elif old_mdisk_data and not mdisk_data:
+ # when check_mode is enabled
+ if self.module.check_mode:
+ self.changed = True
+ return
+ self.restapi.svc_run_command('chmdisk', {'name': self.name}, [self.old_name])
+ self.changed = True
+ msg = "mdisk [{0}] has been successfully rename to [{1}].".format(self.old_name, self.name)
+ return msg
+
+ def mdisk_create(self):
+ if self.drive:
+ if self.drivecount or self.driveclass or self.stripewidth:
+ self.module.fail_json(msg="The parameters 'drive' and "
+ "'driveclass, drivecount, stripewidth' are mutually exclusive.")
+ elif self.drivecount and self.driveclass:
+ if self.drivecount and not (2 <= int(self.drivecount) <= 128):
+ self.module.fail_json(msg="You must pass drivecount value in the range 2 - 128 only.")
+
+ if self.stripewidth and not (2 <= int(self.stripewidth) <= 16):
+ self.module.fail_json(msg="You must pass stripewidth value in the range 2 - 16 only.")
+ else:
+ self.module.fail_json(msg="You must pass any one of the following two: "
+ "(1) 'drive' for RAID array "
+ "(2) 'driveclass and drivecount' for DRAID array.")
+
+ if not self.level:
+ self.module.fail_json(msg="You must pass in level to the module.")
+ if not self.mdiskgrp:
+ self.module.fail_json(msg="You must pass in "
+ "mdiskgrp to the module.")
+
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ self.log("creating mdisk '%s'", self.name)
+
+ # Make command
+ cmdopts = {}
+ if self.drive:
+ cmd = 'mkarray'
+ cmdopts['drive'] = self.drive
+ elif self.driveclass and self.drivecount:
+ cmd = 'mkdistributedarray'
+ cmdopts['driveclass'] = self.driveclass
+ cmdopts['drivecount'] = self.drivecount
+ cmdopts['allowsuperior'] = True
+ if self.stripewidth:
+ cmdopts['stripewidth'] = self.stripewidth
+
+ if self.encrypt:
+ cmdopts['encrypt'] = self.encrypt
+
+ cmdopts['level'] = self.level
+ cmdopts['strip'] = 256
+ cmdopts['name'] = self.name
+ cmdargs = [self.mdiskgrp]
+ self.log("creating mdisk command=%s opts=%s args=%s",
+ cmd, cmdopts, cmdargs)
+
+ # Run command
+ result = self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
+ self.log("create mdisk result %s", result)
+
+ if 'message' in result:
+ self.changed = True
+ self.log("create mdisk result message %s", result['message'])
+ else:
+ self.module.fail_json(
+ msg="Failed to create mdisk [%s]" % self.name)
+
+ def mdisk_delete(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ self.log("deleting mdisk '%s'", self.name)
+ cmd = 'rmmdisk'
+ cmdopts = {}
+ cmdopts['mdisk'] = self.name
+ cmdargs = [self.mdiskgrp]
+
+ self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
+
+ # Any error will have been raised in svc_run_command
+ # chmkdiskgrp does not output anything when successful.
+ self.changed = True
+
+ def mdisk_update(self, modify):
+ # update the mdisk
+ self.log("updating mdisk '%s'", self.name)
+
+ # cmd = 'chmdisk'
+ # cmdopts = {}
+ # chmdisk does not like mdisk arrays.
+ # cmdargs = [self.name]
+
+ # TBD: Implement changed logic.
+ # result = self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
+
+ # Any error will have been raised in svc_run_command
+ # chmkdiskgrp does not output anything when successful.
+ self.changed = True
+
+ # TBD: Implement a more generic way to check for properties to modify.
+ def mdisk_probe(self, data):
+ ns = []
+
+ field_mappings = (
+ ('drivecount', data['drive_count']),
+ ('level', data['raid_level']),
+ ('encrypt', data['encrypt'])
+ )
+
+ for field, existing_value in field_mappings:
+ ns.append(existing_value != getattr(self, field))
+
+ self.log("mdisk_probe props='%s'", ns)
+ return ns
+
+ def apply(self):
+ changed = False
+ msg = None
+ modify = []
+
+ mdisk_data = self.mdisk_exists(self.name)
+ if self.state == 'present' and self.old_name:
+ msg = self.mdisk_rename(mdisk_data)
+ elif self.state == 'absent' and self.old_name:
+ self.module.fail_json(msg="Rename functionality is not supported when 'state' is absent.")
+ else:
+ if mdisk_data:
+ if self.state == 'absent':
+ self.log("CHANGED: mdisk exists, but "
+ "requested state is 'absent'")
+ changed = True
+ elif self.state == 'present':
+ # This is where we detect if chmdisk should be called.
+ modify = self.mdisk_probe(mdisk_data)
+ if any(modify):
+ self.log("Modification is not supported")
+ else:
+ if self.state == 'present':
+ self.log("CHANGED: mdisk does not exist, "
+ "but requested state is 'present'")
+ changed = True
+
+ if changed:
+ if self.state == 'present':
+ if not mdisk_data:
+ self.mdisk_create()
+ self.changed = True
+ msg = "Mdisk [%s] has been created." % self.name
+ else:
+ # This is where we would modify
+ self.mdisk_update(modify)
+ msg = "Mdisk [%s] has been modified." % self.name
+ self.changed = True
+ elif self.state == 'absent':
+ self.mdisk_delete()
+ msg = "Volume [%s] has been deleted." % self.name
+ self.changed = True
+ else:
+ self.log("exiting with no changes")
+ if self.state == 'absent':
+ msg = "Mdisk [%s] did not exist." % self.name
+ else:
+ msg = "Mdisk [%s] already exists. No modifications done" % self.name
+
+ if self.module.check_mode:
+ msg = 'skipping changes due to check mode'
+
+ self.module.exit_json(msg=msg, changed=self.changed)
+
+
+def main():
+ v = IBMSVCmdisk()
+ try:
+ v.apply()
+ except Exception as e:
+ v.log("Exception in apply(): \n%s", format_exc())
+ v.module.fail_json(msg="Module failed. Error [%s]." % to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_mdiskgrp.py b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_mdiskgrp.py
new file mode 100644
index 000000000..997ea1eca
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_mdiskgrp.py
@@ -0,0 +1,674 @@
+#!/usr/bin/python
+# Copyright (C) 2020 IBM CORPORATION
+# Author(s): Peng Wang <wangpww@cn.ibm.com>
+# Sanjaikumaar M <sanjaikumaar.m@ibm.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_svc_mdiskgrp
+short_description: This module manages pools on IBM Storage Virtualize family systems
+description:
+ - Ansible interface to manage 'mkmdiskgrp' and 'rmmdiskgrp' pool commands.
+version_added: "1.0.0"
+options:
+ name:
+ description:
+ - Specifies the name to assign to the new pool.
+ required: true
+ type: str
+ state:
+ description:
+ - Creates (C(present)) or removes (C(absent)) an MDisk group.
+ choices: [ absent, present ]
+ required: true
+ type: str
+ clustername:
+ description:
+ - The hostname or management IP of the Storage Virtualize system.
+ type: str
+ required: true
+ domain:
+ description:
+ - Domain for the Storage Virtualize system.
+ - Valid when hostname is used for the parameter I(clustername).
+ type: str
+ username:
+ description:
+ - REST API username for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ password:
+ description:
+ - REST API password for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ token:
+ description:
+ - The authentication token to verify a user on the Storage Virtualize system.
+ - To generate a token, use the M(ibm.storage_virtualize.ibm_svc_auth) module.
+ type: str
+ version_added: '1.5.0'
+ datareduction:
+ description:
+ - Defines use of data reduction pools (DRPs) on the MDisk group.
+ - Applies when I(state=present), to create a pool.
+ type: str
+ default: 'no'
+ choices: ['yes', 'no']
+ easytier:
+ description:
+ - Defines use of easytier with the MDisk group.
+ - Applies when I(state=present), to create a pool.
+ type: str
+ default: 'off'
+ choices: ['on', 'off', 'auto']
+ encrypt:
+ description:
+ - Defines use of encryption with the MDisk group.
+ - Applies when I(state=present), to create a pool.
+ type: str
+ default: 'no'
+ choices: ['yes', 'no']
+ ext:
+ description:
+ - Specifies the size of the extents for this group in MB.
+ - Applies when I(state=present), to create a pool.
+ type: int
+ log_path:
+ description:
+ - Path of debug log file.
+ type: str
+ validate_certs:
+ description:
+ - Validates certification.
+ default: false
+ type: bool
+ parentmdiskgrp:
+ description:
+ - Parentmdiskgrp for subpool.
+ - Applies when I(state=present), to create a pool.
+ type: str
+ safeguarded:
+ description:
+ - Specify to create a safeguarded child pool.
+ - Applicable only during child pool creation.
+ type: bool
+ version_added: 1.8.0
+ noquota:
+ description:
+ - Specify to create a data reduction child pool.
+ - I(noquota) and I(size) parameters are mutually exclusive.
+ - I(noquota) parameter must be used with I(datareduction) set to yes to create a data reduction child pool.
+ - I(noquota) parameter must be used with I(parentmdiskgrp) in a parent data reduction storage pool.
+ type: bool
+ version_added: 1.8.0
+ unit:
+ description:
+ - Unit for subpool.
+ - Applies when I(state=present), to create a pool.
+ type: str
+ provisioningpolicy:
+ description:
+ - Specify the name of the provisioning policy to map it with the pool.
+ - Applies, when I(state=present).
+ type: str
+ version_added: 1.10.0
+ noprovisioningpolicy:
+ description:
+ - Specify to unmap provisioning policy from the pool.
+ - Applies, when I(state=present) to modify an existing pool.
+ type: bool
+ version_added: 1.10.0
+ replicationpoollinkuid:
+ description:
+ - Specifies the replication pool unique identifier which should be same as the pool that present in the replication server.
+ - Applies, when I(state=present).
+ - Supported in SV build 8.5.2.1 or later.
+ type: str
+ version_added: 1.10.0
+ resetreplicationpoollinkuid:
+ description:
+ - If set, any links between this pool on local system and pools on remote systems will be removed.
+ - Applies, when I(state=present) to modify an existing pool.
+ - Supported in SV build 8.5.2.1 or later.
+ type: bool
+ version_added: 1.10.0
+ replication_partner_clusterid:
+ description:
+ - Specifies the id or name of the partner cluster which will be used for replication.
+ - Applies, when I(state=present).
+ - Supported in SV build 8.5.2.1 or later.
+ type: str
+ version_added: 1.10.0
+ size:
+ description:
+ - Specifies the child pool capacity. The value must be
+ a numeric value (and an integer multiple of the extent size).
+ - Applies when I(state=present), to create a pool.
+ type: int
+ warning:
+ description:
+ - If specified, generates a warning when the used disk capacity in the storage pool first exceeds the specified threshold.
+ - The default value is 80. To disable it, specify the value as 0.
+ - Applies when I(state=present) while creating the pool.
+ type: int
+ version_added: '1.12.0'
+ ownershipgroup:
+ description:
+ - Specifies the name of the ownershipgroup to map it with the pool.
+ - Applies when I(state=present).
+ type: str
+ version_added: '1.12.0'
+ noownershipgroup:
+ description:
+ - Specifies to unmap ownershipgroup from the pool.
+ - Applies when I(state=present) to modify an existing pool.
+ type: bool
+ version_added: '1.12.0'
+ vdiskprotectionenabled:
+ description:
+ - Specifies whether volume protection is enabled for this storage pool. The default value is 'yes'.
+ - Applies when I(state=present).
+ type: str
+ choices: ['yes', 'no']
+ version_added: '1.12.0'
+ etfcmoverallocationmax:
+ description:
+ - Specifies the maximum over allocation which Easy Tier can migrate onto FlashCore Module arrays, when the array is used as the top
+ tier in a multitier pool. The value acts as a multiplier of the physically available space.
+ - The allowed values are a percentage in the range of 100% (default) to 400% or off. Setting the value to off disables this feature.
+ - Applies when I(state=present).
+ type: str
+ version_added: '1.12.0'
+ old_name:
+ description:
+ - Specifies the old name of an existing pool.
+ - Applies when I(state=present), to rename the existing pool.
+ type: str
+ version_added: '1.12.0'
+
+author:
+ - Peng Wang(@wangpww)
+ - Sanjaikumaar M (@sanjaikumaar)
+ - Lavanya C R(@lavanya)
+notes:
+ - This module supports C(check_mode).
+'''
+EXAMPLES = '''
+- name: Create mdisk group
+ ibm.storage_virtualize.ibm_svc_mdiskgrp:
+ clustername: "{{clustername}}"
+ domain: "{{domain}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ name: pool1
+ provisioningpolicy: pp0
+ replicationpoollinkuid: 000000000000000
+ replication_partner_clusterid: 000000000032432342
+ etfcmoverallocationmax: 120
+ state: present
+ datareduction: no
+ easytier: auto
+ encrypt: no
+ ext: 1024
+- name: Create childpool with ownershipgroup
+ ibm.storage_virtualize.ibm_svc_mdiskgrp:
+ clustername: "{{clustername}}"
+ domain: "{{domain}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ name: childpool0
+ ownershipgroup: owner0
+ parentmdiskgrp: pool1
+ state: present
+ datareduction: no
+ easytier: auto
+ encrypt: no
+ ext: 1024
+- name: Create a safeguarded backup location
+ ibm.storage_virtualize.ibm_svc_mdiskgrp:
+ clustername: "{{clustername}}"
+ token: "{{results.token}}"
+ log_path: "{{log_path}}"
+ parentmdiskgrp: Pool1
+ name: Pool1child1
+ datareduction: 'yes'
+ safeguarded: True
+ ext: 1024
+ noquota: True
+ state: present
+- name: Delete mdisk group
+ ibm.storage_virtualize.ibm_svc_mdiskgrp:
+ clustername: "{{clustername}}"
+ domain: "{{domain}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ name: pool1
+ state: absent
+- name: Delete a safeguarded backup location
+ ibm.storage_virtualize.ibm_svc_mdiskgrp:
+ clustername: "{{clustername}}"
+ token: "{{results.token}}"
+ log_path: "{{log_path}}"
+ parentmdiskgrp: Pool1
+ name: Pool1child1
+ state: absent
+'''
+
+RETURN = '''#'''
+
+from traceback import format_exc
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi, svc_argument_spec, get_logger
+
+
+class IBMSVCmdiskgrp(object):
+ def __init__(self):
+ argument_spec = svc_argument_spec()
+
+ argument_spec.update(
+ dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', required=True, choices=['absent',
+ 'present']),
+ datareduction=dict(type='str', default='no', choices=['yes',
+ 'no']),
+ easytier=dict(type='str', default='off', choices=['on', 'off',
+ 'auto']),
+ encrypt=dict(type='str', default='no', choices=['yes', 'no']),
+ ext=dict(type='int'),
+ parentmdiskgrp=dict(type='str'),
+ safeguarded=dict(type='bool'),
+ noquota=dict(type='bool'),
+ size=dict(type='int'),
+ unit=dict(type='str'),
+ provisioningpolicy=dict(type='str'),
+ noprovisioningpolicy=dict(type='bool'),
+ replicationpoollinkuid=dict(type='str'),
+ resetreplicationpoollinkuid=dict(type='bool'),
+ replication_partner_clusterid=dict(type='str'),
+ warning=dict(type='int'),
+ vdiskprotectionenabled=dict(type='str', choices=['yes', 'no']),
+ ownershipgroup=dict(type='str'),
+ noownershipgroup=dict(type='bool'),
+ etfcmoverallocationmax=dict(type='str'),
+ old_name=dict(type='str')
+ )
+ )
+
+ mutually_exclusive = []
+ self.module = AnsibleModule(argument_spec=argument_spec,
+ mutually_exclusive=mutually_exclusive,
+ supports_check_mode=True)
+
+ # logging setup
+ log_path = self.module.params['log_path']
+ log = get_logger(self.__class__.__name__, log_path)
+ self.log = log.info
+
+ # Required
+ self.name = self.module.params['name']
+ self.state = self.module.params['state']
+
+ # Optional
+ self.datareduction = self.module.params.get('datareduction', None)
+ self.easytier = self.module.params.get('easytier', None)
+ self.encrypt = self.module.params.get('encrypt', None)
+ self.ext = self.module.params.get('ext', None)
+ self.safeguarded = self.module.params.get('safeguarded', False)
+ self.noquota = self.module.params.get('noquota', False)
+ self.provisioningpolicy = self.module.params.get('provisioningpolicy', '')
+ self.noprovisioningpolicy = self.module.params.get('noprovisioningpolicy', False)
+ self.replicationpoollinkuid = self.module.params.get('replicationpoollinkuid', '')
+ self.resetreplicationpoollinkuid = self.module.params.get('resetreplicationpoollinkuid', False)
+ self.replication_partner_clusterid = self.module.params.get('replication_partner_clusterid', '')
+ self.warning = self.module.params.get('warning', None)
+ self.ownershipgroup = self.module.params.get('ownershipgroup', '')
+ self.noownershipgroup = self.module.params.get('noownershipgroup', False)
+ self.vdiskprotectionenabled = self.module.params.get('vdiskprotectionenabled', None)
+ self.etfcmoverallocationmax = self.module.params.get('etfcmoverallocationmax', '')
+ self.old_name = self.module.params.get('old_name', '')
+
+ self.parentmdiskgrp = self.module.params.get('parentmdiskgrp', None)
+ self.size = self.module.params.get('size', None)
+ self.unit = self.module.params.get('unit', None)
+
+ # internal variable
+ self.changed = False
+
+ # Dynamic variable
+ self.partnership_index = None
+
+ self.basic_checks()
+
+ self.restapi = IBMSVCRestApi(
+ module=self.module,
+ clustername=self.module.params['clustername'],
+ domain=self.module.params['domain'],
+ username=self.module.params['username'],
+ password=self.module.params['password'],
+ validate_certs=self.module.params['validate_certs'],
+ log_path=log_path,
+ token=self.module.params['token']
+ )
+
+ def basic_checks(self):
+ if not self.name:
+ self.module.fail_json(msg='Missing mandatory parameter: name')
+
+ if self.state == 'present':
+ message = 'Following parameters are required together: replicationpoollinkuid, replication_partner_clusterid'
+ if self.replication_partner_clusterid:
+ if not self.replicationpoollinkuid:
+ self.module.fail_json(msg=message)
+ else:
+ if self.replicationpoollinkuid:
+ self.module.fail_json(msg=message)
+
+ if self.replicationpoollinkuid and self.resetreplicationpoollinkuid:
+ self.module.fail_json(
+ msg='Mutually exclusive parameters: replicationpoollinkuid, resetreplicationpoollinkuid'
+ )
+
+ elif self.state == 'absent':
+ invalids = ('warning', 'ownershipgroup', 'noownershipgroup', 'vdiskprotectionenabled', 'etfcmoverallocationmax', 'old_name')
+ invalid_exists = ', '.join((var for var in invalids if getattr(self, var) not in {'', None}))
+
+ if invalid_exists:
+ self.module.fail_json(
+ msg='state=absent but following parameters have been passed: {0}'.format(invalid_exists))
+
+ def create_validation(self):
+ invalids = ('noownershipgroup', 'old_name')
+ invalid_exists = ', '.join((var for var in invalids if getattr(self, var) not in {'', None}))
+
+ if invalid_exists:
+ self.module.fail_json(
+ msg='Following parameters not supported during creation: {0}'.format(invalid_exists)
+ )
+
+ def mdiskgrp_rename(self, mdiskgrp_data):
+ msg = None
+ old_mdiskgrp_data = self.mdiskgrp_exists(self.old_name)
+ if not old_mdiskgrp_data and not mdiskgrp_data:
+ self.module.fail_json(msg="mdiskgrp [{0}] does not exists.".format(self.old_name))
+ elif old_mdiskgrp_data and mdiskgrp_data:
+ self.module.fail_json(msg="mdiskgrp with name [{0}] already exists.".format(self.name))
+ elif not old_mdiskgrp_data and mdiskgrp_data:
+ msg = "mdiskgrp [{0}] already renamed.".format(self.name)
+ elif old_mdiskgrp_data and not mdiskgrp_data:
+ if (self.old_name == self.parentmdiskgrp):
+ self.module.fail_json("Old name shouldn't be same as parentmdiskgrp while renaming childmdiskgrp")
+ # when check_mode is enabled
+ if self.module.check_mode:
+ self.changed = True
+ return
+ self.restapi.svc_run_command('chmdiskgrp', {'name': self.name}, [self.old_name])
+ self.changed = True
+ msg = "mdiskgrp [{0}] has been successfully rename to [{1}].".format(self.old_name, self.name)
+ return msg
+
+ def mdiskgrp_exists(self, name):
+ merged_result = {}
+ data = self.restapi.svc_obj_info(
+ cmd='lsmdiskgrp',
+ cmdopts=None,
+ cmdargs=['-gui', name]
+ )
+
+ if isinstance(data, list):
+ for d in data:
+ merged_result.update(d)
+ else:
+ merged_result = data
+
+ return merged_result
+
+ def mdiskgrp_create(self):
+ # So ext is optional to mkmdiskgrp but make required in ansible
+ # until all options for create are implemented.
+ # if not self.ext:
+ # self.module.fail_json(msg="You must pass in ext to the module.")
+
+ self.create_validation()
+
+ self.log("creating mdisk group '%s'", self.name)
+
+ # Make command
+ cmd = 'mkmdiskgrp'
+ cmdopts = {}
+
+ if not self.ext:
+ self.module.fail_json(msg="You must pass the ext to the module.")
+
+ if self.noquota or self.safeguarded:
+ if not self.parentmdiskgrp:
+ self.module.fail_json(msg='Required parameter missing: parentmdiskgrp')
+
+ self.check_partnership()
+
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ if self.parentmdiskgrp:
+ cmdopts['parentmdiskgrp'] = self.parentmdiskgrp
+ if self.size:
+ cmdopts['size'] = self.size
+ if self.unit:
+ cmdopts['unit'] = self.unit
+ if self.safeguarded:
+ cmdopts['safeguarded'] = self.safeguarded
+ if self.noquota:
+ cmdopts['noquota'] = self.noquota
+ else:
+ if self.easytier:
+ cmdopts['easytier'] = self.easytier
+ if self.encrypt:
+ cmdopts['encrypt'] = self.encrypt
+ if self.ext:
+ cmdopts['ext'] = str(self.ext)
+ if self.provisioningpolicy:
+ cmdopts['provisioningpolicy'] = self.provisioningpolicy
+ if self.datareduction:
+ cmdopts['datareduction'] = self.datareduction
+ if self.replicationpoollinkuid:
+ cmdopts['replicationpoollinkuid'] = self.replicationpoollinkuid
+ if self.ownershipgroup:
+ cmdopts['ownershipgroup'] = self.ownershipgroup
+ if self.vdiskprotectionenabled:
+ cmdopts['vdiskprotectionenabled'] = self.vdiskprotectionenabled
+ if self.etfcmoverallocationmax:
+ if "%" not in self.etfcmoverallocationmax and self.etfcmoverallocationmax != "off":
+ cmdopts['etfcmoverallocationmax'] = self.etfcmoverallocationmax + "%"
+ else:
+ cmdopts['etfcmoverallocationmax'] = self.etfcmoverallocationmax
+
+ if self.warning:
+ cmdopts['warning'] = str(self.warning) + "%"
+ cmdopts['name'] = self.name
+ self.log("creating mdisk group command %s opts %s", cmd, cmdopts)
+
+ # Run command
+ result = self.restapi.svc_run_command(cmd, cmdopts, cmdargs=None)
+ self.log("creating mdisk group result %s", result)
+
+ if self.replication_partner_clusterid:
+ self.set_bit_mask()
+
+ if 'message' in result:
+ self.log("creating mdisk group command result message %s",
+ result['message'])
+ else:
+ self.module.fail_json(
+ msg="Failed to create mdisk group [%s]" % (self.name))
+
+ def check_partnership(self):
+ if self.replication_partner_clusterid:
+ merged_result = {}
+ result = self.restapi.svc_obj_info(
+ cmd='lspartnership',
+ cmdopts=None,
+ cmdargs=['-gui', self.replication_partner_clusterid]
+ )
+
+ if isinstance(result, list):
+ for res in result:
+ merged_result = res
+ else:
+ merged_result = result
+
+ if merged_result:
+ self.partnership_index = merged_result.get('partnership_index')
+ else:
+ self.module.fail_json(
+ msg='Partnership does not exist for the given cluster ({0}).'.format(self.replication_partner_clusterid)
+ )
+
+ def set_bit_mask(self, systemmask=None):
+ cmd = 'chmdiskgrp'
+ bit_mask = '1'.ljust(int(self.partnership_index) + 1, '0') if not systemmask else systemmask
+ cmdopts = {'replicationpoollinkedsystemsmask': bit_mask}
+ self.restapi.svc_run_command(cmd, cmdopts, cmdargs=[self.name])
+
+ def mdiskgrp_delete(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ self.log("deleting mdiskgrp '%s'", self.name)
+
+ cmd = 'rmmdiskgrp'
+ cmdopts = None
+ cmdargs = [self.name]
+
+ self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
+
+ # Any error will have been raised in svc_run_command
+ # chmkdiskgrp does not output anything when successful.
+
+ def mdiskgrp_update(self, modify):
+ # updte the mdisk group
+ self.log("updating mdiskgrp '%s'", self.name)
+
+ systemmask = None
+ cmd = 'chmdiskgrp'
+
+ if 'replicationpoollinkedsystemsmask' in modify:
+ systemmask = modify.pop('replicationpoollinkedsystemsmask')
+
+ if modify:
+ cmdopts = modify
+ self.restapi.svc_run_command(cmd, cmdopts, cmdargs=[self.name])
+
+ if systemmask or 'replicationpoollinkuid' in modify:
+ self.set_bit_mask(systemmask)
+
+ self.changed = True
+
+ # TBD: Implement a more generic way to check for properties to modify.
+ def mdiskgrp_probe(self, data):
+ props = {}
+
+ if self.noprovisioningpolicy and data.get('provisioning_policy_name', ''):
+ props['noprovisioningpolicy'] = self.noprovisioningpolicy
+ if self.provisioningpolicy and self.provisioningpolicy != data.get('provisioning_policy_name', ''):
+ props['provisioningpolicy'] = self.provisioningpolicy
+ if self.noownershipgroup and data.get('owner_name', ''):
+ props['noownershipgroup'] = self.noownershipgroup
+ if self.ownershipgroup and self.ownershipgroup != data.get('owner_name', ''):
+ props['ownershipgroup'] = self.ownershipgroup
+ if self.vdiskprotectionenabled and self.vdiskprotectionenabled != data.get('vdisk_protectionenabled', ''):
+ props['vdiskprotectionenabled'] = self.vdiskprotectionenabled
+ if self.warning and self.warning != data.get('warning', ''):
+ props['warning'] = str(self.warning) + "%"
+ if self.replicationpoollinkuid and self.replicationpoollinkuid != data.get('replication_pool_link_uid', ''):
+ props['replicationpoollinkuid'] = self.replicationpoollinkuid
+ if self.resetreplicationpoollinkuid:
+ props['resetreplicationpoollinkuid'] = self.resetreplicationpoollinkuid
+ if self.etfcmoverallocationmax:
+ if "%" not in self.etfcmoverallocationmax and self.etfcmoverallocationmax != "off":
+ self.etfcmoverallocationmax += "%"
+ if self.etfcmoverallocationmax != data.get('easy_tier_fcm_over_allocation_max', ''):
+ props['etfcmoverallocationmax'] = self.etfcmoverallocationmax
+ if self.replication_partner_clusterid:
+ self.check_partnership()
+ bit_mask = '1'.ljust(int(self.partnership_index) + 1, '0')
+ if bit_mask.zfill(64) != data.get('replication_pool_linked_systems_mask', ''):
+ props['replicationpoollinkedsystemsmask'] = bit_mask
+
+ self.log("mdiskgrp_probe props='%s'", props)
+ return props
+
+ def apply(self):
+ changed = False
+ msg = None
+ modify = []
+
+ mdiskgrp_data = self.mdiskgrp_exists(self.name)
+ if self.state == 'present' and self.old_name:
+ msg = self.mdiskgrp_rename(mdiskgrp_data)
+ elif self.state == 'absent' and self.old_name:
+ self.module.fail_json(msg="Rename functionality is not supported when 'state' is absent.")
+
+ else:
+ if mdiskgrp_data:
+ if self.state == 'absent':
+ self.log("CHANGED: mdisk group exists, "
+ "but requested state is 'absent'")
+ changed = True
+ elif self.state == 'present':
+ # This is where we detect if chmdiskgrp should be called.
+ modify = self.mdiskgrp_probe(mdiskgrp_data)
+ if modify:
+ changed = True
+ else:
+ if self.state == 'present':
+ self.log("CHANGED: mdisk group does not exist, "
+ "but requested state is 'present'")
+ changed = True
+ if changed:
+ if self.state == 'present':
+ if not mdiskgrp_data:
+ self.mdiskgrp_create()
+ self.changed = True
+ msg = "Mdisk group [%s] has been created." % self.name
+ else:
+ # This is where we would modify
+ self.mdiskgrp_update(modify)
+ msg = "Mdisk group [%s] has been modified." % self.name
+
+ elif self.state == 'absent':
+ self.mdiskgrp_delete()
+ self.changed = True
+ msg = "mdiskgrp [%s] has been deleted." % self.name
+
+ else:
+ self.log("exiting with no changes")
+ if self.state == 'absent':
+ msg = "Mdisk group [%s] did not exist." % self.name
+ else:
+ msg = "Mdisk group [%s] already exists. No modifications done" % self.name
+
+ if self.module.check_mode:
+ msg = 'skipping changes due to check mode'
+
+ self.module.exit_json(msg=msg, changed=self.changed)
+
+
+def main():
+ v = IBMSVCmdiskgrp()
+ try:
+ v.apply()
+ except Exception as e:
+ v.log("Exception in apply(): \n%s", format_exc())
+ v.module.fail_json(msg="Module failed. Error [%s]." % to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_start_stop_flashcopy.py b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_start_stop_flashcopy.py
new file mode 100644
index 000000000..072e124b2
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_start_stop_flashcopy.py
@@ -0,0 +1,262 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2021 IBM CORPORATION
+# Author(s): Sreshtant Bohidar <sreshtant.bohidar@ibm.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_svc_start_stop_flashcopy
+short_description: This module starts or stops FlashCopy mapping and consistency groups on IBM Storage Virtualize family systems
+description:
+ - Ansible interface to manage 'startfcmap', 'stopfcmap', 'startfcconsistgrp', and 'stopfcconsistgrp' commands.
+version_added: "1.4.0"
+options:
+ name:
+ description:
+ - Specifies the name of the FlashCopy mapping or FlashCopy consistency group.
+ required: true
+ type: str
+ state:
+ description:
+ - Starts (C(started)) or stops (C(stopped)) a FlashCopy mapping or FlashCopy consistency group.
+ choices: [ started, stopped ]
+ required: true
+ type: str
+ clustername:
+ description:
+ - The hostname or management IP of the Storage Virtualize system.
+ type: str
+ required: true
+ domain:
+ description:
+ - Domain for the Storage Virtualize system.
+ - Valid when hostname is used for the parameter I(clustername).
+ type: str
+ username:
+ description:
+ - REST API username for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ password:
+ description:
+ - REST API password for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ token:
+ description:
+ - The authentication token to verify a user on the Storage Virtualize system.
+ - To generate a token, use the ibm_svc_auth module.
+ type: str
+ version_added: '1.5.0'
+ isgroup:
+ description:
+ - If specified True, the associated I(name) parameter is set as name of the FlashCopy consistency group.
+ - If specified False, or unspecified, the associated I(name) parameter is set as name of the FlashCopy mapping.
+ required: false
+ type: bool
+ force:
+ description:
+ - Specifies that all processing associated with the FlashCopy mapping or FlashCopy consistency group be immediately stopped.
+ - Valid when I(state=stopped), to stop a FlashCopy mapping or FlashCopy consistency group.
+ required: false
+ type: bool
+ log_path:
+ description:
+ - Path of debug log file.
+ type: str
+ validate_certs:
+ description:
+ - Validates certification.
+ default: false
+ type: bool
+author:
+ - Sreshtant Bohidar(@Sreshtant-Bohidar)
+notes:
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = '''
+- name: Start a FlashCopy mapping
+ ibm.storage_virtualize.ibm_svc_start_stop_flashcopy:
+ clustername: "{{clustername}}"
+ domain: "{{domain}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ log_path: /tmp/playbook.debug
+ name: mapping-name
+ state: started
+- name: Stop a FlashCopy mapping
+ ibm.storage_virtualize.ibm_svc_start_stop_flashcopy:
+ clustername: "{{clustername}}"
+ domain: "{{domain}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ log_path: /tmp/playbook.debug
+ name: mapping-name
+ state: stopped
+- name: Start a FlashCopy consistency group
+ ibm.storage_virtualize.ibm_svc_start_stop_flashcopy:
+ clustername: "{{clustername}}"
+ domain: "{{domain}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ log_path: /tmp/playbook.debug
+ name: fcconsistgrp-name
+ isgroup: true
+ state: started
+- name: Stop a FlashCopy consistency group
+ ibm.storage_virtualize.ibm_svc_start_stop_flashcopy:
+ clustername: "{{clustername}}"
+ domain: "{{domain}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ log_path: /tmp/playbook.debug
+ name: fcconsistgrp-name
+ isgroup: true
+ state: stopped
+'''
+
+RETURN = '''#'''
+
+from traceback import format_exc
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi, svc_argument_spec, get_logger
+from ansible.module_utils._text import to_native
+
+
+class IBMSVCFlashcopyStartStop(object):
+ def __init__(self):
+ argument_spec = svc_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', required=True, choices=['started', 'stopped']),
+ isgroup=dict(type='bool', required=False),
+ force=dict(type='bool', required=False),
+ )
+ )
+
+ self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ # logging setup
+ log_path = self.module.params['log_path']
+ log = get_logger(self.__class__.__name__, log_path)
+ self.log = log.info
+
+ # Required
+ self.name = self.module.params['name']
+ self.state = self.module.params['state']
+
+ # Optional
+ self.isgroup = self.module.params.get('isgroup', False)
+ self.force = self.module.params.get('force', False)
+
+ # Handling missing mandatory parameters
+ if not self.name:
+ self.module.fail_json(msg='Missing mandatory parameter: name')
+
+ self.restapi = IBMSVCRestApi(
+ module=self.module,
+ clustername=self.module.params['clustername'],
+ domain=self.module.params['domain'],
+ username=self.module.params['username'],
+ password=self.module.params['password'],
+ validate_certs=self.module.params['validate_certs'],
+ log_path=log_path,
+ token=self.module.params['token']
+ )
+
+ def get_existing_fcmapping(self):
+ merged_result = {}
+ data = {}
+ if self.isgroup:
+ data = self.restapi.svc_obj_info(cmd='lsfcconsistgrp', cmdopts=None, cmdargs=[self.name])
+ else:
+ data = self.restapi.svc_obj_info(cmd='lsfcmap', cmdopts=None, cmdargs=[self.name])
+ if isinstance(data, list):
+ for d in data:
+ merged_result.update(d)
+ else:
+ merged_result = data
+ return merged_result
+
+ def start_fc(self):
+ cmd = ''
+ if self.isgroup:
+ cmd = 'startfcconsistgrp'
+ else:
+ cmd = 'startfcmap'
+ cmdopts = {}
+ cmdopts['prep'] = True
+ if self.force:
+ cmdopts["force"] = self.force
+ self.log("Starting fc mapping.. Command %s opts %s", cmd, cmdopts)
+ self.restapi.svc_run_command(cmd, cmdopts, cmdargs=[self.name])
+
+ def stop_fc(self):
+ cmd = ''
+ if self.isgroup:
+ cmd = 'stopfcconsistgrp'
+ else:
+ cmd = 'stopfcmap'
+ cmdopts = {}
+ if self.force:
+ cmdopts["force"] = self.force
+ self.log("Stopping fc mapping.. Command %s opts %s", cmd, cmdopts)
+ self.restapi.svc_run_command(cmd, cmdopts, cmdargs=[self.name])
+
+ def apply(self):
+ changed = False
+ msg = None
+ fcdata = self.get_existing_fcmapping()
+ if fcdata:
+ if self.state == "started" and fcdata["start_time"] == "":
+ self.log("[%s] exists, but requested state is 'started'", self.name)
+ changed = True
+ elif self.state == "stopped" and fcdata["start_time"] != "":
+ self.log("[%s] exists, but requested state is 'stopped'", self.name)
+ changed = True
+ if changed:
+ if self.module.check_mode:
+ msg = 'skipping changes due to check mode.'
+ else:
+ if self.state == "started":
+ self.start_fc()
+ msg = "fc [%s] has been started" % self.name
+ elif self.state == "stopped":
+ self.stop_fc()
+ msg = "fc [%s] has been stopped" % self.name
+ else:
+ if fcdata:
+ if self.state == "started" or self.state == "stopped":
+ self.log("[%s] exists, but currently in [%s] state", self.name, fcdata["status"])
+ if self.isgroup:
+ msg = "FlashCopy Consistency Group [%s] is in [%s] state." % (self.name, fcdata["status"])
+ else:
+ msg = "FlashCopy Mapping [%s] is in [%s] state." % (self.name, fcdata["status"])
+ else:
+ if self.state == "started" or self.state == "stopped":
+ if self.isgroup:
+ msg = "FlashCopy Consistency Group [%s] does not exist." % self.name
+ else:
+ msg = "FlashCopy Mapping [%s] does not exist." % self.name
+ self.module.exit_json(msg=msg, changed=changed)
+
+
+def main():
+ v = IBMSVCFlashcopyStartStop()
+ try:
+ v.apply()
+ except Exception as e:
+ v.log("Exception in apply(): \n%s", format_exc())
+ v.module.fail_json(msg="Module failed. Error [%s]." % to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_start_stop_replication.py b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_start_stop_replication.py
new file mode 100644
index 000000000..63e941012
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_start_stop_replication.py
@@ -0,0 +1,294 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2020 IBM CORPORATION
+# Author(s): Rohit Kumar <rohit.kumar6@ibm.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_svc_start_stop_replication
+short_description: This module starts or stops remote copies on IBM Storage Virtualize family systems
+version_added: "1.3.0"
+
+description:
+ - Ansible interface to manage remote copy related commands.
+
+options:
+ name:
+ description:
+ - Specifies a name to assign to the new remote copy relationship or group, or to operate on the existing remote copy.
+ type: str
+ state:
+ description:
+ - Starts (C(started)) or stops (C(stopped)) a remote copy relationship.
+ choices: [started, stopped]
+ required: true
+ type: str
+ clustername:
+ description:
+ - The hostname or management IP of the Storage Virtualize system.
+ type: str
+ required: true
+ domain:
+ description:
+ - Domain for the Storage Virtualize system.
+ - Valid when hostname is used for the parameter I(clustername).
+ type: str
+ username:
+ description:
+ - REST API username for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ password:
+ description:
+ - REST API password for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ token:
+ description:
+ - The authentication token to verify a user on the Storage Virtualize system.
+ - To generate a token, use the ibm_svc_auth module.
+ type: str
+ version_added: '1.5.0'
+ primary:
+ description:
+ - Specifies the copy direction by defining which disk becomes the primary (source).
+ - Applies when I(state=started).
+ type: str
+ choices: [ 'master', 'aux' ]
+ isgroup:
+ description:
+ - Specifies that a consistency group has to be started or stopped.
+ default: false
+ type: bool
+ clean:
+ description:
+ - Specifies that the volume that is to become a secondary is clean.
+ - Applies when I(state=started).
+ default: false
+ type: bool
+ access:
+ description:
+ - Instructs the system to allow write access to a consistent secondary volume.
+ - Applies when I(state=stopped).
+ default: false
+ type: bool
+ force:
+ description:
+ - Specifies that the system must process the copy operation even if it causes a temporary loss of consistency during synchronization.
+ - Applies when I(state=started).
+ type: bool
+ validate_certs:
+ description:
+ - Validates certification.
+ default: false
+ type: bool
+ log_path:
+ description:
+ - Path of debug log file.
+ type: str
+author:
+ - rohit(@rohitk-github)
+notes:
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = '''
+- name: Start remote copy
+ ibm.storage_virtualize.ibm_svc_start_stop_replication:
+ name: sample_rcopy
+ clustername: "{{clustername}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ log_path: /tmp/ansible.log
+ state: started
+ clean: true
+- name: Stop remote copy
+ ibm.storage_virtualize.ibm_svc_start_stop_replication:
+ name: sample_rcopy
+ clustername: "{{clustername}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ log_path: /tmp/ansible.log
+ state: stopped
+'''
+
+RETURN = '''#'''
+
+
+from ansible.module_utils._text import to_native
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi, svc_argument_spec, get_logger
+from ansible.module_utils.basic import AnsibleModule
+from traceback import format_exc
+
+
+class IBMSVCStartStopReplication(object):
+ def __init__(self):
+ argument_spec = svc_argument_spec()
+
+ argument_spec.update(
+ dict(
+ name=dict(type='str'),
+ state=dict(type='str',
+ required=True,
+ choices=['started', 'stopped']),
+ force=dict(type='bool', required=False),
+ primary=dict(type='str', choices=['master', 'aux']),
+ clean=dict(type='bool', default=False),
+ access=dict(type='bool', default=False),
+ isgroup=dict(type='bool', default=False),
+ )
+ )
+
+ self.module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ # logging setup
+ log_path = self.module.params['log_path']
+ log = get_logger(self.__class__.__name__, log_path)
+ self.log = log.info
+
+ # Required
+ self.name = self.module.params['name']
+ self.state = self.module.params['state']
+
+ # Optional
+ self.primary = self.module.params.get('primary', None)
+ self.clean = self.module.params.get('clean', False)
+ self.access = self.module.params.get('access', False)
+ self.force = self.module.params.get('force', False)
+ self.isgroup = self.module.params.get('isgroup', False)
+
+ # Handling missing mandatory parameter name
+ if not self.name:
+ self.module.fail_json(msg='Missing mandatory parameter: name')
+
+ self.restapi = IBMSVCRestApi(
+ module=self.module,
+ clustername=self.module.params['clustername'],
+ domain=self.module.params['domain'],
+ username=self.module.params['username'],
+ password=self.module.params['password'],
+ validate_certs=self.module.params['validate_certs'],
+ log_path=log_path,
+ token=self.module.params['token']
+ )
+
+ def start(self):
+ """
+ Starts the Metro Mirror or Global Mirror relationship copy process, set
+ the direction of copy if undefined, and (optionally) mark the secondary
+ volume of the relationship as clean. The relationship must be a
+ stand-alone relationship.
+ """
+ cmdopts = {}
+ self.log("self.primary is %s", self.primary)
+ if self.primary:
+ cmdopts['primary'] = self.primary
+ if self.clean:
+ cmdopts['clean'] = self.clean
+ if self.force:
+ cmdopts['force'] = self.force
+ if self.isgroup:
+ result = self.restapi.svc_run_command(cmd='startrcconsistgrp',
+ cmdopts=cmdopts,
+ cmdargs=[self.name])
+ if result == '':
+ self.changed = True
+ self.log("succeeded to start the remote copy group %s", self.name)
+ elif 'message' in result:
+ self.changed = True
+ self.log("start the remote copy group %s with result message %s", self.name, result['message'])
+ else:
+ msg = "Failed to start the remote copy group [%s]" % self.name
+ self.module.fail_json(msg=msg)
+ else:
+ result = self.restapi.svc_run_command(cmd='startrcrelationship',
+ cmdopts=cmdopts,
+ cmdargs=[self.name])
+ self.log("start the rcrelationship %s with result %s", self.name, result)
+ if result == '':
+ self.changed = True
+ self.log("succeeded to start the remote copy %s", self.name)
+ elif 'message' in result:
+ self.changed = True
+ self.log("start the rcrelationship %s with result message %s", self.name, result['message'])
+ else:
+ msg = "Failed to start the rcrelationship [%s]" % self.name
+ self.module.fail_json(msg=msg)
+
+ def stop(self):
+ """
+ Stops the copy process for a Metro Mirror or Global Mirror stand-alone
+ relationship.
+ """
+ cmdopts = {}
+ if self.access:
+ cmdopts['access'] = self.access
+ if self.isgroup:
+ result = self.restapi.svc_run_command(cmd='stoprcconsistgrp',
+ cmdopts=cmdopts,
+ cmdargs=[self.name])
+ self.log("stop the remote copy group %s with result %s", self.name, result)
+ if result == '':
+ self.changed = True
+ self.log("succeeded to stop the remote copy group %s", self.name)
+ elif 'message' in result:
+ self.changed = True
+ self.log("stop the remote copy group %s with result message %s", self.name, result['message'])
+ else:
+ msg = "Failed to stop the rcrelationship [%s]" % self.name
+ self.module.fail_json(msg=msg)
+ else:
+ result = self.restapi.svc_run_command(cmd='stoprcrelationship', cmdopts=cmdopts, cmdargs=[self.name])
+ self.log("stop the rcrelationship %s with result %s", self.name, result)
+ if result == '':
+ self.changed = True
+ self.log("succeeded to stop the remote copy %s", self.name)
+ elif 'message' in result:
+ self.changed = True
+ self.log("stop the rcrelationship %s with result message %s", self.name, result['message'])
+ else:
+ msg = "Failed to stop the rcrelationship [%s]" % self.name
+ self.module.fail_json(msg=msg)
+
+ def apply(self):
+ msg = None
+ self.log("self state is %s", self.state)
+ if self.module.check_mode:
+ msg = 'skipping changes due to check mode.'
+ else:
+ if self.state == 'started':
+ self.start()
+ if not self.isgroup:
+ msg = "remote copy [%s] has been started." % self.name
+ else:
+ msg = "remote copy group [%s] has been started." % self.name
+ elif self.state == 'stopped':
+ self.stop()
+ if not self.isgroup:
+ msg = "remote copy [%s] has been stopped." % self.name
+ else:
+ msg = "remote copy group [%s] has been stopped." % self.name
+ else:
+ msg = "Invalid %s state. Supported states are 'started' and 'stopped'" % self.state
+
+ self.module.exit_json(msg=msg, changed=True)
+
+
+def main():
+ v = IBMSVCStartStopReplication()
+ try:
+ v.apply()
+ except Exception as e:
+ v.log("Exception in apply(): \n%s", format_exc())
+ v.module.fail_json(msg="Module failed. Error [%s]." % to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_vol_map.py b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_vol_map.py
new file mode 100644
index 000000000..33b70353d
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svc_vol_map.py
@@ -0,0 +1,374 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2020 IBM CORPORATION
+# Author(s): Peng Wang <wangpww@cn.ibm.com>
+# Sreshtant Bohidar <sreshtant.bohidar@ibm.com>
+# Shilpi Jain <shilpi.jain1@ibm.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_svc_vol_map
+short_description: This module manages volume mapping on IBM Storage Virtualize family systems
+description:
+ - Ansible interface to manage volume mapping commands
+ 'mkvdiskhostmap', 'rmvdiskhostmap', 'mkvolumehostclustermap', and 'rmvolumehostclustermap'.
+version_added: "1.0.0"
+options:
+ volname:
+ description:
+ - Specifies the volume name for host or hostcluster mapping.
+ required: true
+ type: str
+ host:
+ description:
+ - Specifies the host name for host mapping.
+ - This parameter is required to create or delete a volume-to-host mapping.
+ type: str
+ hostcluster:
+ description:
+ - Specifies the name of the host cluster for host mapping.
+ - This parameter is required to create or delete a volume-to-hostcluster mapping.
+ type: str
+ scsi:
+ description:
+ - Specifies the SCSI logical unit number (LUN) ID to assign to a volume on the specified host or host cluster.
+ - Applies when I(state=present).
+ type: int
+ state:
+ description:
+ - Creates (C(present)) or removes (C(absent)) a volume mapping.
+ choices: [ absent, present ]
+ required: true
+ type: str
+ clustername:
+ description:
+ - The hostname or management IP of the Storage Virtualize system.
+ type: str
+ required: true
+ domain:
+ description:
+ - Domain for the Storage Virtualize system.
+ - Valid when hostname is used for the parameter I(clustername).
+ type: str
+ username:
+ description:
+ - REST API username for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ password:
+ description:
+ - REST API password for the Storage Virtualize system.
+ - The parameters I(username) and I(password) are required if not using I(token) to authenticate a user.
+ type: str
+ token:
+ description:
+ - The authentication token to verify a user on the Storage Virtualize system.
+ - To generate a token, use the ibm_svc_auth module.
+ type: str
+ version_added: '1.5.0'
+ log_path:
+ description:
+ - Path of debug log file.
+ type: str
+ validate_certs:
+ description:
+ - Validates certification.
+ default: false
+ type: bool
+author:
+ - Peng Wang(@wangpww)
+notes:
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = '''
+- name: Map a volume to a host
+ ibm.storage_virtualize.ibm_svc_vol_map:
+ clustername: "{{clustername}}"
+ domain: "{{domain}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ log_path: /tmp/playbook.debug
+ volname: volume0
+ host: host4test
+ scsi: 1
+ state: present
+- name: Unmap a volume from a host
+ ibm.storage_virtualize.ibm_svc_vol_map:
+ clustername: "{{clustername}}"
+ domain: "{{domain}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ log_path: /tmp/playbook.debug
+ volname: volume0
+ host: host4test
+ state: absent
+'''
+
+RETURN = '''#'''
+
+from traceback import format_exc
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi, svc_argument_spec, get_logger
+from ansible.module_utils._text import to_native
+
+
+class IBMSVCvdiskhostmap(object):
+ def __init__(self):
+ argument_spec = svc_argument_spec()
+
+ argument_spec.update(
+ dict(
+ volname=dict(type='str', required=True),
+ host=dict(type='str', required=False),
+ state=dict(type='str', required=True, choices=['absent',
+ 'present']),
+ scsi=dict(type='int', required=False),
+ hostcluster=dict(type='str', required=False)
+ )
+ )
+
+ self.module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ # logging setup
+ log_path = self.module.params['log_path']
+ log = get_logger(self.__class__.__name__, log_path)
+ self.log = log.info
+
+ # Required
+ self.volname = self.module.params['volname']
+ self.state = self.module.params['state']
+
+ # Optional
+ self.host = self.module.params['host']
+ self.hostcluster = self.module.params['hostcluster']
+ self.scsi = self.module.params['scsi']
+
+ # Handline for mandatory parameter volname
+ if not self.volname:
+ self.module.fail_json(msg="Missing mandatory parameter: volname")
+
+ self.restapi = IBMSVCRestApi(
+ module=self.module,
+ clustername=self.module.params['clustername'],
+ domain=self.module.params['domain'],
+ username=self.module.params['username'],
+ password=self.module.params['password'],
+ validate_certs=self.module.params['validate_certs'],
+ log_path=log_path,
+ token=self.module.params['token']
+ )
+
+ def get_existing_vdiskhostmap(self):
+ merged_result = []
+
+ data = self.restapi.svc_obj_info(cmd='lsvdiskhostmap', cmdopts=None,
+ cmdargs=[self.volname])
+
+ if isinstance(data, list):
+ for d in data:
+ merged_result.append(d)
+ elif data:
+ merged_result = [data]
+
+ return merged_result
+
+ # TBD: Implement a more generic way to check for properties to modify.
+ def vdiskhostmap_probe(self, mdata):
+ props = []
+ self.log("vdiskhostmap_probe props='%s'", mdata)
+ mapping_exist = False
+ for data in mdata:
+ if self.host:
+ if (self.host == data['host_name']) and (self.volname == data['name']):
+ if self.scsi and (self.scsi != int(data['SCSI_id'])):
+ self.module.fail_json(msg="Update not supported for parameter: scsi")
+ mapping_exist = True
+ elif self.hostcluster:
+ if (self.hostcluster == data['host_cluster_name']) and (self.volname == data['name']):
+ if self.scsi and (self.scsi != int(data['SCSI_id'])):
+ self.module.fail_json(msg="Update not supported for parameter: scsi")
+ mapping_exist = True
+
+ if not mapping_exist:
+ props += ["map"]
+
+ if props is []:
+ props = None
+
+ self.log("vdiskhostmap_probe props='%s'", props)
+ return props
+
+ def vdiskhostmap_create(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ self.log("creating vdiskhostmap '%s' '%s'", self.volname, self.host)
+
+ # Make command
+ cmd = 'mkvdiskhostmap'
+ cmdopts = {'force': True}
+ cmdopts['host'] = self.host
+ cmdopts['scsi'] = self.scsi
+ cmdargs = [self.volname]
+
+ self.log("creating vdiskhostmap command %s opts %s args %s",
+ cmd, cmdopts, cmdargs)
+
+ # Run command
+ result = self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
+ self.log("create vdiskhostmap result %s", result)
+
+ if 'message' in result:
+ self.changed = True
+ self.log("create vdiskhostmap result message %s",
+ result['message'])
+ else:
+ self.module.fail_json(msg="Failed to create vdiskhostmap.")
+
+ def vdiskhostmap_delete(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ self.log("deleting vdiskhostmap '%s'", self.volname)
+
+ cmd = 'rmvdiskhostmap'
+ cmdopts = {}
+ cmdopts['host'] = self.host
+ cmdargs = [self.volname]
+
+ self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
+
+ # Any error will have been raised in svc_run_command
+ # chmvdisk does not output anything when successful.
+ self.changed = True
+
+ def vdiskhostclustermap_create(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ self.log("creating mkvolumehostclustermap '%s' '%s'", self.volname, self.hostcluster)
+
+ # Make command
+ cmd = 'mkvolumehostclustermap'
+ cmdopts = {'force': True}
+ cmdopts['hostcluster'] = self.hostcluster
+ cmdopts['scsi'] = self.scsi
+ cmdargs = [self.volname]
+
+ self.log("creating vdiskhostmap command %s opts %s args %s",
+ cmd, cmdopts, cmdargs)
+
+ # Run command
+ result = self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
+ self.log("create vdiskhostmap result %s", result)
+
+ if 'message' in result:
+ self.changed = True
+ self.log("create vdiskhostmap result message %s",
+ result['message'])
+ else:
+ self.module.fail_json(msg="Failed to create vdiskhostmap.")
+
+ def vdiskhostclustermap_delete(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ self.log("deleting vdiskhostclustermap '%s'", self.volname)
+
+ cmd = 'rmvolumehostclustermap'
+ cmdopts = {}
+ cmdopts['hostcluster'] = self.hostcluster
+ cmdargs = [self.volname]
+
+ self.restapi.svc_run_command(cmd, cmdopts, cmdargs)
+
+ # Any error will have been raised in svc_run_command
+ # chmvdisk does not output anything when successful.
+ self.changed = True
+
+ def apply(self):
+ changed = False
+ msg = None
+
+ # Handling for volume
+ if not self.volname:
+ self.module.fail_json(msg="You must pass in "
+ "volname to the module.")
+
+ # Handling for host and hostcluster
+ if (self.host and self.hostcluster):
+ self.module.fail_json(msg="Either use host or hostcluster")
+ elif (not self.host and not self.hostcluster):
+ self.module.fail_json(msg="Missing parameter: host or hostcluster")
+
+ vdiskmap_data = self.get_existing_vdiskhostmap()
+ self.log("volume mapping data is : '%s'", vdiskmap_data)
+
+ if vdiskmap_data:
+ if self.state == 'absent':
+ self.log("vdiskmap exists, "
+ "and requested state is 'absent'")
+ changed = True
+ elif self.state == 'present':
+ probe_data = self.vdiskhostmap_probe(vdiskmap_data)
+ if probe_data:
+ self.log("vdiskmap does not exist, but requested state is 'present'")
+ changed = True
+ else:
+ if self.state == 'present':
+ self.log("vdiskmap does not exist, "
+ "but requested state is 'present'")
+ changed = True
+
+ if changed:
+ if self.state == 'present':
+ if self.host:
+ self.vdiskhostmap_create()
+ msg = "Vdiskhostmap %s %s has been created." % (
+ self.volname, self.host)
+ elif self.hostcluster:
+ self.vdiskhostclustermap_create()
+ msg = "Vdiskhostclustermap %s %s has been created." % (
+ self.volname, self.hostcluster)
+ elif self.state == 'absent':
+ if self.host:
+ self.vdiskhostmap_delete()
+ msg = "vdiskhostmap [%s] has been deleted." % self.volname
+ elif self.hostcluster:
+ self.vdiskhostclustermap_delete()
+ msg = "vdiskhostclustermap [%s] has been deleted." % self.volname
+
+ if self.module.check_mode:
+ msg = 'skipping changes due to check mode'
+ else:
+ self.log("exiting with no changes")
+ if self.state == 'absent':
+ msg = "Volume mapping [%s] did not exist." % self.volname
+ else:
+ msg = "Volume mapping [%s] already exists." % self.volname
+
+ self.module.exit_json(msg=msg, changed=changed)
+
+
+def main():
+ v = IBMSVCvdiskhostmap()
+ try:
+ v.apply()
+ except Exception as e:
+ v.log("Exception in apply(): \n%s", format_exc())
+ v.module.fail_json(msg="Module failed. Error [%s]." % to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svcinfo_command.py b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svcinfo_command.py
new file mode 100644
index 000000000..036ac385d
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svcinfo_command.py
@@ -0,0 +1,226 @@
+#!/usr/bin/python
+# Copyright (C) 2020 IBM CORPORATION
+# Author(s): Shilpi Jain <shilpi.jain1@ibm.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_svcinfo_command
+short_description: This module implements SSH Client which helps to run
+ svcinfo CLI command on IBM Storage Virtualize family systems
+version_added: "1.2.0"
+description:
+- Runs single svcinfo CLI command on IBM Storage Virtualize family systems.
+ Filter options like filtervalue or pipe '|' with grep, awk, and others are
+ not supported in the command in this module.
+ Paramiko must be installed to use this module.
+author:
+ - Shilpi Jain (@Shilpi-Jain1)
+options:
+ command:
+ description:
+ - Single svcinfo CLI command to be executed on Storage Virtualize system.
+ Each command must start with svcinfo keyword.
+ type: str
+ usesshkey:
+ description:
+ - For key-pair based SSH connection, set this field as C('yes').
+ Provide full path of keyfile in key_filename field.
+ If not provided, default path of SSH key is used.
+ type: str
+ choices: [ 'yes', 'no']
+ default: 'no'
+ key_filename:
+ description:
+ - SSH client private key filename. By default, C(~/.ssh/id_rsa) is used.
+ type: str
+ clustername:
+ description:
+ - The hostname or management IP of the
+ Storage Virtualize system.
+ type: str
+ required: true
+ username:
+ description:
+ - Username for the Storage Virtualize system.
+ required: true
+ type: str
+ password:
+ description:
+ - Password for the Storage Virtualize system.
+ required: true
+ type: str
+ log_path:
+ description:
+ - Path of debug log file.
+ type: str
+'''
+
+EXAMPLES = '''
+- name: Run svcinfo CLI command using SSH client with password
+ ibm.storage_virtualize.ibm_svcinfo_command:
+ command: "svcinfo lsuser {{user}}"
+ clustername: "{{clustername}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ log_path: /tmp/ansible.log
+- name: Run svcinfo CLI command using passwordless SSH Client
+ ibm.storage_virtualize.ibm_svcinfo_command:
+ command: "svcinfo lsuser"
+ usesshkey: "yes"
+ clustername: "{{clustername}}"
+ username: "{{username}}"
+ password:
+ log_path: /tmp/ansible.log
+'''
+
+RETURN = '''#'''
+
+from traceback import format_exc
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import svc_ssh_argument_spec, get_logger
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_ssh import IBMSVCssh
+from ansible.module_utils._text import to_native
+
+
+class IBMSVCsshClient(object):
+ def __init__(
+ self,
+ timeout=30,
+ cmd_timeout=30.0):
+ """
+ Constructor for SSH client class.
+ """
+
+ argument_spec = svc_ssh_argument_spec()
+
+ argument_spec.update(
+ dict(
+ command=dict(type='str', required=False),
+ usesshkey=dict(type='str', required=False, default='no', choices=['yes', 'no']),
+ key_filename=dict(type='str', required=False)
+ )
+ )
+
+ self.module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ # logging setup
+ log_path = self.module.params['log_path']
+ log = get_logger(self.__class__.__name__, log_path)
+ self.log = log.info
+
+ # Required parameters for module
+ self.command = self.module.params['command']
+
+ # local SSH keys will be used in case of password less SSH connection
+ self.usesshkey = self.module.params['usesshkey']
+ self.key_filename = self.module.params['key_filename']
+
+ # Required
+ self.clustername = self.module.params['clustername']
+ self.username = self.module.params['username']
+ self.password = self.module.params['password']
+ self.log_path = log_path
+
+ # Handling missing mandatory parameter command
+ if not self.command:
+ self.module.fail_json(msg='Missing mandatory parameter: command')
+
+ if self.password is None:
+ if self.usesshkey == 'yes':
+ self.log("password is none and use ssh private key. Check for its path")
+ if self.key_filename:
+ self.look_for_keys = True
+ else:
+ self.log("key file_name is not provided, use default one, ~/.ssh/id_rsa.pub")
+ self.look_for_keys = True
+ else:
+ self.module.fail_json(msg="You must pass in either password or key for ssh")
+ else:
+ self.look_for_keys = False
+
+ # Connect to the storage
+ self.ssh_client = IBMSVCssh(
+ module=self.module,
+ clustername=self.module.params['clustername'],
+ username=self.module.params['username'],
+ password=self.module.params['password'],
+ look_for_keys=self.look_for_keys,
+ key_filename=self.key_filename,
+ log_path=log_path
+ )
+
+ def modify_command(self, argument):
+ index = None
+ command = [item.strip() for item in argument.split()]
+ if command:
+ for n, word in enumerate(command):
+ if word.startswith('ls') and 'svcinfo' in command[n - 1]:
+ index = n
+ break
+ if index:
+ command.insert(index + 1, '-json')
+ return ' '.join(command)
+
+ def send_svcinfo_command(self):
+ info_output = ""
+ message = ""
+ failed = False
+
+ if self.ssh_client.is_client_connected:
+ if not self.command.startswith('svcinfo'):
+ failed = True
+ message = "The command must start with svcinfo"
+ if (self.command.find('|') != -1):
+ failed = True
+ message = "Pipe(|) is not supported in command."
+ if (self.command.find('-filtervalue') != -1):
+ failed = True
+ message = "'filtervalue' is not supported in command."
+ if not failed:
+ new_command = self.modify_command(self.command)
+ self.log("Executing CLI command: %s", new_command)
+ stdin, stdout, stderr = self.ssh_client.client.exec_command(new_command)
+ for line in stdout.readlines():
+ info_output += line
+ self.log(info_output)
+ rc = stdout.channel.recv_exit_status()
+ if rc > 0:
+ message = stderr.read()
+ if len(message) > 0:
+ message = message.decode('utf-8')
+ self.log("Error in executing CLI command: %s", new_command)
+ self.log("%s", message)
+ else:
+ message = "Unknown error"
+ self.ssh_client._svc_disconnect()
+ self.module.fail_json(msg=message, rc=rc, stdout=info_output)
+ self.ssh_client._svc_disconnect()
+ self.module.exit_json(msg=message, rc=rc, stdout=info_output, changed=False)
+ else:
+ message = "SSH client is not connected"
+ self.ssh_client._svc_disconnect()
+ self.module.fail_json(msg=message)
+
+
+def main():
+ v = IBMSVCsshClient()
+ try:
+ if not v.ssh_client.is_client_connected:
+ v.log("SSH Connection failed, retry")
+ v.module.exit_json(msg="SSH connection failed, retry", changed=False)
+ else:
+ v.send_svcinfo_command()
+ except Exception as e:
+ v.ssh_client._svc_disconnect()
+ v.log("Exception in executing CLI command(): \n%s", format_exc())
+ v.module.fail_json(msg="Module failed. Error [%s]." % to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svctask_command.py b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svctask_command.py
new file mode 100644
index 000000000..3df3d9823
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/plugins/modules/ibm_svctask_command.py
@@ -0,0 +1,211 @@
+#!/usr/bin/python
+# Copyright (C) 2020 IBM CORPORATION
+# Author(s): Shilpi Jain <shilpi.jain1@ibm.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_svctask_command
+short_description: This module implements SSH Client which helps to run
+ svctask CLI command(s) on IBM Storage Virtualize family systems
+version_added: "1.2.0"
+description:
+- Runs svctask CLI command(s) on IBM Storage Virtualize Family systems.
+ In case any svctask command fails while running this module, then the
+ module stops processing further commands in the list.
+ Paramiko must be installed to use this module.
+author:
+ - Shilpi Jain (@Shilpi-Jain1)
+options:
+ command:
+ description:
+ - A list containing svctask CLI commands to be executed on storage.
+ - Each command must start with 'svctask' keyword.
+ type: list
+ elements: str
+ usesshkey:
+ description:
+ - For key-pair based SSH connection, set this field as "yes".
+ Provide full path of key in key_filename field.
+ If not provided, default path of SSH key is used.
+ type: str
+ choices: [ 'yes', 'no']
+ default: 'no'
+ key_filename:
+ description:
+ - SSH client private key filename. By default, ~/.ssh/id_rsa is used.
+ type: str
+ clustername:
+ description:
+ - The hostname or management IP of the Storage Virtualize system.
+ type: str
+ required: true
+ username:
+ description:
+ - Username for the Storage Virtualize system.
+ required: true
+ type: str
+ password:
+ description:
+ - Password for the Storage Virtualize system.
+ required: true
+ type: str
+ log_path:
+ description:
+ - Path of debug log file.
+ type: str
+'''
+
+EXAMPLES = '''
+- name: Run svctask CLI commands using SSH client with password
+ ibm.storage_virtualize.ibm_svctask_command:
+ command: [
+ "svctask mkvdisk -name {{ volname }} -mdiskgrp '{{ pool }}' -easytier '{{ easy_tier }}' -size {{ size }} -unit {{ unit }}",
+ "svctask rmvdisk {{ volname }}"
+ ]
+ clustername: "{{clustername}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ log_path: /tmp/ansible.log
+- name: Run svctask CLI command using passwordless SSH Client
+ ibm.storage_virtualize.ibm_svctask_command:
+ command: [
+ "svctask mkvdisk -name vol0 -mdiskgrp pool0 -easytier off -size 1 -unit gb",
+ "svctask rmvdisk vol0"
+ ]
+ clustername: "{{clustername}}"
+ username: "{{username}}"
+ password:
+ usesshkey: yes
+ log_path: /tmp/ansible.log
+'''
+
+RETURN = '''#'''
+
+from traceback import format_exc
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import svc_ssh_argument_spec, get_logger
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_ssh import IBMSVCssh
+from ansible.module_utils._text import to_native
+
+
+class IBMSVCsshClient(object):
+ def __init__(
+ self,
+ timeout=30,
+ cmd_timeout=30.0):
+ """
+ Constructor for SSH client class.
+ """
+
+ argument_spec = svc_ssh_argument_spec()
+
+ argument_spec.update(
+ dict(
+ command=dict(type='list', elements='str', required=False),
+ usesshkey=dict(type='str', required=False, default='no', choices=['yes', 'no']),
+ key_filename=dict(type='str', required=False)
+ )
+ )
+
+ self.module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ # logging setup
+ log_path = self.module.params['log_path']
+ log = get_logger(self.__class__.__name__, log_path)
+ self.log = log.info
+
+ # Required fields for module
+ self.command = self.module.params['command']
+
+ # local SSH keys will be used in case of password less SSH connection
+ self.usesshkey = self.module.params['usesshkey']
+ self.key_filename = self.module.params['key_filename']
+
+ # Required
+ self.clustername = self.module.params['clustername']
+ self.username = self.module.params['username']
+ self.password = self.module.params['password']
+ self.log_path = log_path
+
+ # Handling missing mandatory parameter
+ if not self.command:
+ self.module.fail_json(msg='Missing mandatory parameter: command')
+
+ if self.password is None:
+ if self.usesshkey == 'yes':
+ self.log("password is none and use ssh private key. Check for its path")
+ if self.key_filename:
+ self.log("key file_name is provided, use it")
+ self.look_for_keys = True
+ else:
+ self.log("key file_name is not provided, use default one, ~/.ssh/id_rsa.pub")
+ self.look_for_keys = True
+ else:
+ self.log("password is none and SSH key is not provided")
+ self.module.fail_json(msg="You must pass in either password or key for ssh")
+ else:
+ self.log("password is given")
+ self.look_for_keys = False
+
+ self.ssh_client = IBMSVCssh(
+ module=self.module,
+ clustername=self.module.params['clustername'],
+ username=self.module.params['username'],
+ password=self.module.params['password'],
+ look_for_keys=self.look_for_keys,
+ key_filename=self.key_filename,
+ log_path=log_path
+ )
+
+ def send_svctask_command(self):
+ message = ""
+ if self.ssh_client.is_client_connected:
+ for cmd in self.command:
+ if not cmd.startswith('svctask'):
+ self.ssh_client._svc_disconnect()
+ self.module.fail_json(msg="The command must start with svctask", changed=False)
+ self.log("Executing CLI command: %s", cmd)
+ stdin, stdout, stderr = self.ssh_client.client.exec_command(cmd)
+ for line in stdout.readlines():
+ message += line
+ self.log(line)
+ rc = stdout.channel.recv_exit_status()
+ if rc > 0:
+ result = stderr.read()
+ if len(result) > 0:
+ result = result.decode('utf-8')
+ self.log("Error in executing CLI command: %s", cmd)
+ self.log("%s", result)
+ message += result
+ else:
+ message = "Unknown error"
+ self.ssh_client._svc_disconnect()
+ self.module.fail_json(msg=message, rc=rc)
+ else:
+ message = "SSH client is not connected"
+ self.ssh_client._svc_disconnect()
+ self.module.exit_json(msg=message, rc=rc, changed=True)
+
+
+def main():
+ v = IBMSVCsshClient()
+ try:
+ if not v.ssh_client.is_client_connected:
+ v.log("SSH Connection failed, retry")
+ v.module.exit_json(msg="SSH Connection failed, retry", changed=False)
+ else:
+ v.send_svctask_command()
+ except Exception as e:
+ v.ssh_client._svc_disconnect()
+ v.log("Exception in running command(): \n%s", format_exc())
+ v.module.fail_json(msg="Module failed. Error [%s]." % to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ibm/storage_virtualize/requirements.txt b/ansible_collections/ibm/storage_virtualize/requirements.txt
new file mode 100644
index 000000000..6298163bc
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/requirements.txt
@@ -0,0 +1,3 @@
+cryptography >= '42.0.5'
+paramiko >= '3.4.0'
+python_version >= '3.9'
diff --git a/ansible_collections/ibm/storage_virtualize/requirements.yml b/ansible_collections/ibm/storage_virtualize/requirements.yml
new file mode 100644
index 000000000..8b8dce2bd
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/requirements.yml
@@ -0,0 +1,4 @@
+# requirements.yml
+---
+collections:
+ - ibm.storage_virtualize
diff --git a/ansible_collections/ibm/storage_virtualize/tests/config.yml b/ansible_collections/ibm/storage_virtualize/tests/config.yml
new file mode 100644
index 000000000..05012468b
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/tests/config.yml
@@ -0,0 +1,2 @@
+modules:
+ python_requires: '>=3.9'
diff --git a/ansible_collections/ibm/storage_virtualize/tests/integration/targets/.gitkeep b/ansible_collections/ibm/storage_virtualize/tests/integration/targets/.gitkeep
new file mode 100644
index 000000000..5c4b10978
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/tests/integration/targets/.gitkeep
@@ -0,0 +1,3 @@
+This is a placeholder file that only exists to keep the current
+directory in Git. It is safe to remove it once this directory contains
+the actual test files.
diff --git a/ansible_collections/ibm/storage_virtualize/tests/unit/.gitkeep b/ansible_collections/ibm/storage_virtualize/tests/unit/.gitkeep
new file mode 100644
index 000000000..5c4b10978
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/tests/unit/.gitkeep
@@ -0,0 +1,3 @@
+This is a placeholder file that only exists to keep the current
+directory in Git. It is safe to remove it once this directory contains
+the actual test files.
diff --git a/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/module_utils/test_ibm_svc_ssh.py b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/module_utils/test_ibm_svc_ssh.py
new file mode 100644
index 000000000..a7d0abd31
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/module_utils/test_ibm_svc_ssh.py
@@ -0,0 +1,102 @@
+# Copyright (C) 2024 IBM CORPORATION
+# Author(s): Shilpi Jain <shilpi.jain1@ibm.com>
+# Sandip G. Rajbanshi <sandip.rajbanshi@ibm.com>
+#
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+""" unit tests IBM Storage Virtualize Ansible module_utils: ibm_svc_ssh """
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import unittest
+import paramiko
+from mock import patch
+from ansible.module_utils import basic
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_ssh import IBMSVCssh
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the
+ test case """
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the
+ test case """
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an
+ exception """
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an
+ exception """
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class TestIBMSVModuleUtilsSsh(unittest.TestCase):
+ """ a group of related Unit Tests"""
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_ssh.IBMSVCssh._svc_connect')
+ def setUp(self, mock_connect):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.sshclient = IBMSVCssh(self.mock_module_helper, '1.2.3.4',
+ 'username', 'password',
+ False, '', 'test.log')
+
+ def set_default_args(self):
+ return dict({
+ 'clustername': 'clustername',
+ 'username': 'username',
+ 'password': 'password',
+ 'look_for_keys': False,
+ })
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_ssh.IBMSVCssh._svc_connect')
+ def test_svc_ssh_connect(self, mock_connect):
+ if paramiko is None:
+ print("paramiko is not installed")
+
+ ret = self.sshclient.is_connected()
+ self.assertTrue(ret)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_ssh.IBMSVCssh._svc_connect')
+ def test_svc_ssh_disconnect_successfully(self, mock_disconnect):
+ if paramiko is None:
+ print("paramiko is not installed")
+
+ patch.object(paramiko.SSHClient, 'close')
+ ret = self.sshclient._svc_disconnect()
+ self.assertTrue(ret)
+
+ @patch('ansible.module_utils.compat.paramiko.paramiko.SSHClient')
+ def test_register_plugin(self, ssh_mock):
+ if paramiko is None:
+ print("paramiko is not installed")
+
+ self.sshclient = IBMSVCssh(self.mock_module_helper, '1.2.3.4',
+ 'username', 'password',
+ False, '', 'test.log')
+ ssh_mock.exec_command.return_value = None
+ result = self.sshclient.register_plugin()
+ self.assertTrue(result)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/module_utils/test_ibm_svc_utils.py b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/module_utils/test_ibm_svc_utils.py
new file mode 100644
index 000000000..b6cbc853a
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/module_utils/test_ibm_svc_utils.py
@@ -0,0 +1,162 @@
+# Copyright (C) 2024 IBM CORPORATION
+# Author(s): Peng Wang <wangpww@cn.ibm.com>
+# Sandip G. Rajbanshi <sandip.rajbanshi@ibm.com>
+#
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+""" unit tests IBM Storage Virtualize Ansible module_utils: ibm_svc_utils """
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import unittest
+import json
+from mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module
+ creation """
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the
+ test case """
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the
+ test case """
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an
+ exception """
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an
+ exception """
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class TestIBMSVModuleUtils(unittest.TestCase):
+ """ a group of related Unit Tests"""
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def setUp(self, connect):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.restapi = IBMSVCRestApi(self.mock_module_helper, '1.2.3.4',
+ 'domain.ibm.com', 'username', 'password',
+ False, 'test.log', '')
+
+ def set_default_args(self):
+ return dict({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ })
+
+ def test_return_port_and_protocol(self):
+ self.assertEqual(self.restapi.port, '7443')
+ self.assertEqual(self.restapi.protocol, 'https')
+
+ def test_return_resturl(self):
+ resturl = 'https://1.2.3.4.domain.ibm.com:7443/rest'
+ self.assertEqual(self.restapi.resturl, resturl)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ @patch('ansible.module_utils.basic.AnsibleModule')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_token_wrap')
+ def test_svc_rest_with_module(self, mock_svc_token_wrap, mock_module,
+ mock_svc_authorize):
+ PARAMS_FOR_PRESENT = {
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password'
+ }
+ mock_module.params = PARAMS_FOR_PRESENT
+ mock_module.return_value = mock_module
+ mock_svc_token_wrap.return_value = {'err': 'err', 'out': []}
+ self.restapi = IBMSVCRestApi(mock_module, '1.2.3.4',
+ 'domain.ibm.com', 'username', 'password',
+ False, 'test.log', '')
+
+ self.restapi.svc_run_command('lshost', {}, [])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_token_wrap')
+ def test_svc_run_command_successfully(self, mock_svc_token_wrap):
+ host_ret = [{"id": "1", "name": "ansible_host", "port_count": "1",
+ "iogrp_count": "4", "status": "offline",
+ "site_id": "", "site_name": "",
+ "host_cluster_id": "", "host_cluster_name": "",
+ "protocol": "nvme", "owner_id": "",
+ "owner_name": ""}]
+ mock_svc_token_wrap.return_value = {'err': '', 'out': host_ret}
+ ret = self.restapi.svc_run_command('lshost', {}, [])
+ mock_svc_token_wrap.assert_called_with('lshost', {}, [], 10)
+ self.assertDictEqual(ret[0], host_ret[0])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_token_wrap')
+ def test_svc_obj_info_return_none(self, mock_svc_token_wrap):
+ mock_svc_token_wrap.return_value = {'code': 500}
+ self.assertEqual(None, self.restapi.svc_obj_info('lshost', {}, []))
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_token_wrap')
+ def test_svc_obj_info_successfully(self, mock_svc_token_wrap):
+ host_ret = [{"id": "1", "name": "ansible_host", "port_count": "1",
+ "iogrp_count": "4", "status": "offline",
+ "site_id": "", "site_name": "",
+ "host_cluster_id": "", "host_cluster_name": "",
+ "protocol": "nvme", "owner_id": "",
+ "owner_name": ""}]
+ mock_svc_token_wrap.return_value = {'out': host_ret, 'code': 1,
+ 'err': ''}
+ ret = self.restapi.svc_obj_info('lshost', {}, [])
+ self.assertDictEqual(ret[0], host_ret[0])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_token_wrap')
+ def test_get_auth_token(self, mock_svc_token_wrap, mock_svc_authorize):
+ test_var = 'a2ca1d31d663ce181b955c07f51a000c2f75835b3d87735d1f334cf4b913880c'
+ mock_svc_authorize.return_value = test_var
+ ret = self.restapi.get_auth_token()
+ self.assertEqual(test_var, ret)
+
+ def test_register_plugin_cmdopts(self):
+ self.sshclient = IBMSVCRestApi(self.mock_module_helper, '1.2.3.4',
+ 'username', 'password',
+ False, '', 'test.log', True)
+ cmdops = self.sshclient.register_plugin_cmdopts()
+ self.assertEqual(cmdops["name"], "Ansible")
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_sv_manage_awss3_cloudaccount.py b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_sv_manage_awss3_cloudaccount.py
new file mode 100644
index 000000000..f52ed4414
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_sv_manage_awss3_cloudaccount.py
@@ -0,0 +1,594 @@
+# Copyright (C) 2022 IBM CORPORATION
+# Author(s): Sanjaikumaar M <sanjaikumaar.m@ibm.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+""" unit tests IBM Storage Virtualize Ansible module: ibm_sv_manage_awss3_cloudaccount """
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import unittest
+import pytest
+import json
+from mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi
+from ansible_collections.ibm.storage_virtualize.plugins.modules.ibm_sv_manage_awss3_cloudaccount import IBMSVAWSS3
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module
+ creation """
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the
+ test case """
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the
+ test case """
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an
+ exception """
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs):
+ """function to patch over fail_json; package return data into an
+ exception """
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class TestIBMSVAWSS3(unittest.TestCase):
+ """
+ Group of related Unit Tests
+ """
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def setUp(self, connect):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.restapi = IBMSVCRestApi(self.mock_module_helper, '1.2.3.4',
+ 'domain.ibm.com', 'username', 'password',
+ False, 'test.log', '')
+
+ def test_missing_state_parameter(self):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'awss3acc'
+ })
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ IBMSVAWSS3()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_mandatory_parameter_validation(self, svc_authorize_mock,
+ svc_obj_info_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'awss3acc',
+ 'state': 'present'
+ })
+
+ svc_obj_info_mock.return_value = {}
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ aws = IBMSVAWSS3()
+ aws.apply()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_aws_acc_create(self, svc_authorize_mock,
+ svc_obj_info_mock,
+ svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'awss3acc',
+ 'bucketprefix': 'ansible',
+ 'accesskeyid': 's3access',
+ 'secretaccesskey': 'saldhsalhdljsah',
+ 'upbandwidthmbits': 20,
+ 'downbandwidthmbits': 20,
+ 'region': 'us-east',
+ 'encrypt': 'no',
+ 'state': 'present'
+ })
+
+ aws = IBMSVAWSS3()
+ svc_obj_info_mock.return_value = {}
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ aws.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_aws_acc_create_idempotency(self, svc_authorize_mock,
+ svc_obj_info_mock,
+ svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'awss3acc',
+ 'bucketprefix': 'ansible',
+ 'accesskeyid': 's3access',
+ 'secretaccesskey': 'saldhsalhdljsah',
+ 'upbandwidthmbits': 20,
+ 'downbandwidthmbits': 20,
+ 'region': 'us-east',
+ 'encrypt': 'no',
+ 'state': 'present'
+ })
+
+ aws = IBMSVAWSS3()
+ svc_obj_info_mock.return_value = {
+ "id": "0",
+ "name": "awss3acc",
+ "type": "awss3",
+ "status": "online",
+ "mode": "normal",
+ "active_volume_count": "1",
+ "backup_volume_count": "1",
+ "import_system_id": "",
+ "import_system_name": "",
+ "error_sequence_number": "",
+ "refreshing": "no",
+ "up_bandwidth_mbits": "20",
+ "down_bandwidth_mbits": "20",
+ "backup_timestamp": "221007111148",
+ "encrypt": "no",
+ "certificate": "yes",
+ "certificate_expiry": "",
+ "endpoint": "",
+ "awss3_bucket_prefix": "ansible",
+ "awss3_access_key_id": "s3access",
+ "awss3_region": "us-east",
+ "swift_keystone": "no",
+ "swift_container_prefix": "",
+ "swift_tenant_name": "",
+ "swift_user_name": ""
+ }
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ aws.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_sv_manage_awss3_cloudaccount.IBMSVAWSS3.is_aws_account_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_rename_aws_account(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ aws_exists_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'awss3_new',
+ 'old_name': 'awss3acc',
+ 'accesskeyid': 'newaccess',
+ 'secretaccesskey': 'saldhsalhdljsah',
+ 'upbandwidthmbits': 10,
+ 'downbandwidthmbits': 10,
+ 'state': 'present'
+ })
+
+ aws_exists_mock.side_effect = iter([
+ {
+ "id": "0",
+ "name": "awss3acc",
+ "up_bandwidth_mbits": "20",
+ "down_bandwidth_mbits": "20",
+ "awss3_access_key_id": "s3access"
+ },
+ {},
+ {
+ "id": "0",
+ "name": "awss3acc",
+ "up_bandwidth_mbits": "20",
+ "down_bandwidth_mbits": "20",
+ "awss3_access_key_id": "s3access"
+ }
+ ])
+
+ aws = IBMSVAWSS3()
+ with pytest.raises(AnsibleExitJson) as exc:
+ aws.apply()
+ svc_run_command_mock.assert_called_with(
+ 'chcloudaccountawss3',
+ {
+ 'secretaccesskey': 'saldhsalhdljsah',
+ 'downbandwidthmbits': '10',
+ 'upbandwidthmbits': '10',
+ 'name': 'awss3_new',
+ 'accesskeyid': 'newaccess',
+ },
+ cmdargs=['awss3acc'],
+ timeout=20
+ )
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_sv_manage_awss3_cloudaccount.IBMSVAWSS3.is_aws_account_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_rename_aws_account_idempotency(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ aws_exists_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'awss3_new',
+ 'old_name': 'awss3acc',
+ 'accesskeyid': 'newaccess',
+ 'secretaccesskey': 'saldhsalhdljsah',
+ 'upbandwidthmbits': 10,
+ 'downbandwidthmbits': 10,
+ 'state': 'present'
+ })
+
+ aws_exists_mock.side_effect = iter([
+ {},
+ {
+ "id": "0",
+ "name": "awss3_new",
+ "up_bandwidth_mbits": "20",
+ "down_bandwidth_mbits": "20",
+ "awss3_access_key_id": "s3access"
+ },
+ {}
+ ])
+ aws = IBMSVAWSS3()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ aws.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_aws_acc_update(self, svc_authorize_mock,
+ svc_obj_info_mock,
+ svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'awss3acc',
+ 'accesskeyid': 'newaccess',
+ 'secretaccesskey': 'saldhsalhdljsah',
+ 'upbandwidthmbits': 10,
+ 'downbandwidthmbits': 10,
+ 'state': 'present'
+ })
+
+ aws = IBMSVAWSS3()
+ svc_obj_info_mock.return_value = {
+ "id": "0",
+ "name": "awss3acc",
+ "type": "awss3",
+ "status": "online",
+ "mode": "normal",
+ "active_volume_count": "1",
+ "backup_volume_count": "1",
+ "import_system_id": "",
+ "import_system_name": "",
+ "error_sequence_number": "",
+ "refreshing": "no",
+ "up_bandwidth_mbits": "20",
+ "down_bandwidth_mbits": "20",
+ "backup_timestamp": "221007111148",
+ "encrypt": "no",
+ "certificate": "yes",
+ "certificate_expiry": "",
+ "endpoint": "",
+ "awss3_bucket_prefix": "ansible",
+ "awss3_access_key_id": "s3access",
+ "awss3_region": "us-east",
+ "swift_keystone": "no",
+ "swift_container_prefix": "",
+ "swift_tenant_name": "",
+ "swift_user_name": ""
+ }
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ aws.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_aws_acc_update_idempotency(self, svc_authorize_mock,
+ svc_obj_info_mock,
+ svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'awss3acc',
+ 'accesskeyid': 'newaccess',
+ 'secretaccesskey': 'saldhsalhdljsah',
+ 'upbandwidthmbits': 10,
+ 'downbandwidthmbits': 10,
+ 'state': 'present'
+ })
+
+ aws = IBMSVAWSS3()
+ svc_obj_info_mock.return_value = {
+ "id": "0",
+ "name": "awss3acc",
+ "type": "awss3",
+ "status": "online",
+ "mode": "normal",
+ "active_volume_count": "1",
+ "backup_volume_count": "1",
+ "import_system_id": "",
+ "import_system_name": "",
+ "error_sequence_number": "",
+ "refreshing": "no",
+ "up_bandwidth_mbits": "10",
+ "down_bandwidth_mbits": "10",
+ "backup_timestamp": "221007111148",
+ "encrypt": "no",
+ "certificate": "no",
+ "certificate_expiry": "",
+ "endpoint": "",
+ "awss3_bucket_prefix": "ansible",
+ "awss3_access_key_id": "newaccess",
+ "awss3_region": "us-west",
+ "swift_keystone": "no",
+ "swift_container_prefix": "",
+ "swift_tenant_name": "",
+ "swift_user_name": ""
+ }
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ aws.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_update_accesskey_without_secretkey(self, svc_authorize_mock,
+ svc_obj_info_mock,
+ svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'awss3acc',
+ 'accesskeyid': 'newaccess',
+ 'upbandwidthmbits': 10,
+ 'downbandwidthmbits': 10,
+ 'region': 'us-west',
+ 'state': 'present'
+ })
+
+ svc_obj_info_mock.return_value = {
+ "id": "0",
+ "name": "awss3acc",
+ "type": "awss3",
+ "status": "online",
+ "mode": "normal",
+ "active_volume_count": "1",
+ "backup_volume_count": "1",
+ "import_system_id": "",
+ "import_system_name": "",
+ "error_sequence_number": "",
+ "refreshing": "no",
+ "up_bandwidth_mbits": "20",
+ "down_bandwidth_mbits": "20",
+ "backup_timestamp": "221007111148",
+ "encrypt": "no",
+ "certificate": "yes",
+ "certificate_expiry": "",
+ "endpoint": "",
+ "awss3_bucket_prefix": "ansible",
+ "awss3_access_key_id": "s3access",
+ "awss3_region": "us-east",
+ "swift_keystone": "no",
+ "swift_container_prefix": "",
+ "swift_tenant_name": "",
+ "swift_user_name": ""
+ }
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ aws = IBMSVAWSS3()
+ aws.apply()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_delete_aws_account_validation(self, svc_authorize_mock,
+ svc_obj_info_mock,
+ svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'awss3acc',
+ 'accesskeyid': 'newaccess',
+ 'upbandwidthmbits': 10,
+ 'downbandwidthmbits': 10,
+ 'region': 'us-west',
+ 'state': 'absent'
+ })
+
+ svc_obj_info_mock.return_value = {
+ "id": "0",
+ "name": "awss3acc",
+ "type": "awss3",
+ "status": "online",
+ "mode": "normal",
+ "active_volume_count": "1",
+ "backup_volume_count": "1",
+ "import_system_id": "",
+ "import_system_name": "",
+ "error_sequence_number": "",
+ "refreshing": "no",
+ "up_bandwidth_mbits": "20",
+ "down_bandwidth_mbits": "20",
+ "backup_timestamp": "221007111148",
+ "encrypt": "no",
+ "certificate": "yes",
+ "certificate_expiry": "",
+ "endpoint": "",
+ "awss3_bucket_prefix": "ansible",
+ "awss3_access_key_id": "s3access",
+ "awss3_region": "us-east",
+ "swift_keystone": "no",
+ "swift_container_prefix": "",
+ "swift_tenant_name": "",
+ "swift_user_name": ""
+ }
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ aws = IBMSVAWSS3()
+ aws.apply()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_delete_aws_account(self, svc_authorize_mock,
+ svc_obj_info_mock,
+ svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'awss3acc',
+ 'state': 'absent'
+ })
+
+ svc_obj_info_mock.return_value = {
+ "id": "0",
+ "name": "awss3acc",
+ "type": "awss3",
+ "status": "online",
+ "mode": "normal",
+ "active_volume_count": "1",
+ "backup_volume_count": "1",
+ "import_system_id": "",
+ "import_system_name": "",
+ "error_sequence_number": "",
+ "refreshing": "no",
+ "up_bandwidth_mbits": "20",
+ "down_bandwidth_mbits": "20",
+ "backup_timestamp": "221007111148",
+ "encrypt": "no",
+ "certificate": "yes",
+ "certificate_expiry": "",
+ "endpoint": "",
+ "awss3_bucket_prefix": "ansible",
+ "awss3_access_key_id": "s3access",
+ "awss3_region": "us-east",
+ "swift_keystone": "no",
+ "swift_container_prefix": "",
+ "swift_tenant_name": "",
+ "swift_user_name": ""
+ }
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ aws = IBMSVAWSS3()
+ aws.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_delete_aws_account_idempotency(self, svc_authorize_mock,
+ svc_obj_info_mock,
+ svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'awss3acc',
+ 'state': 'absent'
+ })
+
+ svc_obj_info_mock.return_value = {}
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ aws = IBMSVAWSS3()
+ aws.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_sv_manage_cloud_backups.py b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_sv_manage_cloud_backups.py
new file mode 100644
index 000000000..7350eb6e3
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_sv_manage_cloud_backups.py
@@ -0,0 +1,445 @@
+# Copyright (C) 2022 IBM CORPORATION
+# Author(s): Sanjaikumaar M <sanjaikumaar.m@ibm.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+""" unit tests IBM Storage Virtualize Ansible module: ibm_sv_manage_cloud_backups """
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import unittest
+import pytest
+import json
+from mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi
+from ansible_collections.ibm.storage_virtualize.plugins.modules.ibm_sv_manage_cloud_backups import IBMSVCloudBackup
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module
+ creation """
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the
+ test case """
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the
+ test case """
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an
+ exception """
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs):
+ """function to patch over fail_json; package return data into an
+ exception """
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class TestIBMSVCloudBackup(unittest.TestCase):
+ """
+ Group of related Unit Tests
+ """
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def setUp(self, connect):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.restapi = IBMSVCRestApi(self.mock_module_helper, '1.2.3.4',
+ 'domain.ibm.com', 'username', 'password',
+ False, 'test.log', '')
+
+ def test_missing_state_parameter(self):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'volume_name': 'vol1'
+ })
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ IBMSVCloudBackup()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ def test_mutually_exclusive_parameters_1(self):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'volume_name': 'vol1',
+ 'volumegroup_name': 'VG1',
+ 'state': 'present'
+ })
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ IBMSVCloudBackup()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ def test_mutually_exclusive_parameters_2(self):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'volume_name': 'vol1',
+ 'volume_UID': '8320948320948',
+ 'state': 'absent'
+ })
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ IBMSVCloudBackup()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ def test_mutually_exclusive_parameters_3(self):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'volume_UID': '8320948320948',
+ 'all': True,
+ 'generation': 1,
+ 'state': 'absent'
+ })
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ IBMSVCloudBackup()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ def test_invalid_parameters_delete_1(self):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'volumegroup_name': 'VG1',
+ 'state': 'absent'
+ })
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ IBMSVCloudBackup()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ def test_invalid_parameters_delete_2(self):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'volume_name': 'vol1',
+ 'full': True,
+ 'state': 'absent'
+ })
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ IBMSVCloudBackup()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_with_invalid_create_paramters(self, svc_authorize_mock,
+ svc_obj_info_mock,
+ svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'volume_name': 'vol1',
+ 'volume_UID': '230984093284032984',
+ 'generation': 1,
+ 'state': 'present'
+ })
+
+ svc_obj_info_mock.return_value = {'id': 1, 'name': 'vol1'}
+ with pytest.raises(AnsibleFailJson) as exc:
+ aws = IBMSVCloudBackup()
+ aws.apply()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_token_wrap')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_cloud_backup_volume(self, svc_authorize_mock,
+ svc_obj_info_mock,
+ svc_token_wrap_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'volume_name': 'vol1',
+ 'state': 'present'
+ })
+
+ aws = IBMSVCloudBackup()
+ svc_obj_info_mock.return_value = {'id': 1, 'name': 'vol1'}
+ svc_token_wrap_mock.return_value = {'out': None}
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ aws.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_token_wrap')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_cloud_backup_volume_idempotency(self, svc_authorize_mock,
+ svc_obj_info_mock,
+ svc_token_wrap_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'volume_name': 'vol1',
+ 'state': 'present'
+ })
+
+ aws = IBMSVCloudBackup()
+ svc_obj_info_mock.return_value = {'id': 1, 'name': 'vol1'}
+ svc_token_wrap_mock.return_value = {'out': b'CMMVC9083E'}
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ aws.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_token_wrap')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_cloud_backup_with_invalid_volume(self, svc_authorize_mock,
+ svc_obj_info_mock,
+ svc_token_wrap_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'volume_name': 'vol1',
+ 'state': 'present'
+ })
+
+ aws = IBMSVCloudBackup()
+ svc_obj_info_mock.return_value = {}
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ aws.apply()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_token_wrap')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_cloud_backup_volumegroup(self, svc_authorize_mock,
+ svc_obj_info_mock,
+ svc_token_wrap_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'volumegroup_name': 'VG1',
+ 'state': 'present'
+ })
+
+ aws = IBMSVCloudBackup()
+ svc_obj_info_mock.return_value = {'id': 1, 'name': 'VG1'}
+ svc_token_wrap_mock.return_value = {'out': None}
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ aws.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_token_wrap')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_cloud_backup_volumegroup_idempotency(self, svc_authorize_mock,
+ svc_obj_info_mock,
+ svc_token_wrap_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'volumegroup_name': 'VG1',
+ 'state': 'present'
+ })
+
+ aws = IBMSVCloudBackup()
+ svc_obj_info_mock.return_value = {'id': 1, 'name': 'VG1'}
+ svc_token_wrap_mock.return_value = {'out': b'CMMVC9083E'}
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ aws.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_token_wrap')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_cloud_backup_with_invalid_volumegroup(self, svc_authorize_mock,
+ svc_obj_info_mock,
+ svc_token_wrap_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'volumegroup_name': 'VG1',
+ 'state': 'present'
+ })
+
+ aws = IBMSVCloudBackup()
+ svc_obj_info_mock.return_value = {}
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ aws.apply()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_delete_cloud_backup_with_volume_name(self, svc_authorize_mock,
+ svc_obj_info_mock,
+ svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'volume_name': 'vol1',
+ 'generation': 1,
+ 'state': 'absent'
+ })
+
+ aws = IBMSVCloudBackup()
+ svc_obj_info_mock.return_value = {'id': 1, 'name': 'vol1'}
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ aws.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_delete_cloud_backup_with_volume_name_idempotency(self, svc_authorize_mock,
+ svc_obj_info_mock,
+ svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'volume_name': 'vol1',
+ 'generation': 1,
+ 'state': 'absent'
+ })
+
+ aws = IBMSVCloudBackup()
+ svc_obj_info_mock.return_value = {}
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ aws.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_delete_cloud_backup_with_uid(self, svc_authorize_mock,
+ svc_obj_info_mock,
+ svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'volume_UID': '3280948320948',
+ 'all': True,
+ 'state': 'absent'
+ })
+
+ aws = IBMSVCloudBackup()
+ svc_obj_info_mock.return_value = {'id': 1, 'name': 'vol1'}
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ aws.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_delete_cloud_backup_with_uid_idempotency(self, svc_authorize_mock,
+ svc_obj_info_mock,
+ svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'volume_UID': '3280948320948',
+ 'all': True,
+ 'state': 'absent'
+ })
+
+ aws = IBMSVCloudBackup()
+ svc_obj_info_mock.return_value = {}
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ aws.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_sv_manage_fc_partnership.py b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_sv_manage_fc_partnership.py
new file mode 100644
index 000000000..787038fd5
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_sv_manage_fc_partnership.py
@@ -0,0 +1,422 @@
+# Copyright (C) 2023 IBM CORPORATION
+# Author(s): Sanjaikumaar M <sanjaikumaar.m@ibm.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+""" unit tests IBM Storage Virtualize Ansible module: ibm_sv_manage_awss3_cloudaccount """
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import unittest
+import pytest
+import json
+from mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi
+from ansible_collections.ibm.storage_virtualize.plugins.modules.ibm_sv_manage_fc_partnership import IBMSVFCPartnership
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module
+ creation """
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the
+ test case """
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the
+ test case """
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an
+ exception """
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs):
+ """function to patch over fail_json; package return data into an
+ exception """
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class TestIBMSVAWSS3(unittest.TestCase):
+ """
+ Group of related Unit Tests
+ """
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def setUp(self, connect):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.restapi = IBMSVCRestApi(self.mock_module_helper, '1.2.3.4',
+ 'domain.ibm.com', 'username', 'password',
+ False, 'test.log', '')
+
+ def test_missing_state_parameter(self):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'remote_system': 'cluster_A'
+ })
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ IBMSVFCPartnership()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ def test_missing_mandatory_parameter(self):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'state': 'present'
+ })
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ IBMSVFCPartnership()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_fc_partnership(self, svc_authorize_mock,
+ svc_obj_info_mock,
+ svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'remote_clustername': 'remote_clustername',
+ 'remote_username': 'remote_username',
+ 'remote_password': 'remote_password',
+ 'remote_system': 'cluster_A',
+ 'linkbandwidthmbits': 20,
+ 'backgroundcopyrate': 50,
+ 'start': True,
+ 'state': 'present'
+ })
+
+ svc_obj_info_mock.side_effect = [{'id': '0123456789'}, {}, {}]
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ fc = IBMSVFCPartnership()
+ fc.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_fc_partnership_idempotency(self, svc_authorize_mock,
+ svc_obj_info_mock,
+ svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'remote_clustername': 'remote_clustername',
+ 'remote_username': 'remote_username',
+ 'remote_password': 'remote_password',
+ 'remote_system': 'cluster_A',
+ 'linkbandwidthmbits': 20,
+ 'backgroundcopyrate': 50,
+ 'state': 'present'
+ })
+
+ svc_obj_info_mock.side_effect = [
+ {'id': '0123456789'},
+ {'id': 0, 'link_bandwidth_mbits': '20', 'background_copy_rate': '50'},
+ {'id': 0, 'link_bandwidth_mbits': '20', 'background_copy_rate': '50'}
+ ]
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ fc = IBMSVFCPartnership()
+ fc.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_update_fc_partnership_two_systems(self, svc_authorize_mock,
+ svc_obj_info_mock,
+ svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'remote_clustername': 'remote_clustername',
+ 'remote_username': 'remote_username',
+ 'remote_password': 'remote_password',
+ 'remote_system': 'cluster_A',
+ 'linkbandwidthmbits': 30,
+ 'backgroundcopyrate': 60,
+ 'state': 'present'
+ })
+
+ svc_obj_info_mock.side_effect = [
+ {'id': '0123456789'},
+ {'id': 0, 'link_bandwidth_mbits': '20', 'background_copy_rate': '50'},
+ {'id': 0, 'link_bandwidth_mbits': '20', 'background_copy_rate': '50'}
+ ]
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ fc = IBMSVFCPartnership()
+ fc.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_update_fc_partnership_two_systems_idempotency(self, svc_authorize_mock,
+ svc_obj_info_mock,
+ svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'remote_clustername': 'remote_clustername',
+ 'remote_username': 'remote_username',
+ 'remote_password': 'remote_password',
+ 'remote_system': 'cluster_A',
+ 'linkbandwidthmbits': 30,
+ 'backgroundcopyrate': 60,
+ 'state': 'present'
+ })
+
+ svc_obj_info_mock.side_effect = [
+ {'id': '0123456789'},
+ {'id': 0, 'link_bandwidth_mbits': '30', 'background_copy_rate': '60'},
+ {'id': 0, 'link_bandwidth_mbits': '30', 'background_copy_rate': '60'}
+ ]
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ fc = IBMSVFCPartnership()
+ fc.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_update_fc_partnership_one_system(self, svc_authorize_mock,
+ svc_obj_info_mock,
+ svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'remote_system': 'cluster_A',
+ 'linkbandwidthmbits': 30,
+ 'backgroundcopyrate': 60,
+ 'state': 'present'
+ })
+
+ svc_obj_info_mock.side_effect = [
+ {'id': 0, 'link_bandwidth_mbits': '20', 'background_copy_rate': '50'},
+ {'id': 0, 'link_bandwidth_mbits': '20', 'background_copy_rate': '50'}
+ ]
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ fc = IBMSVFCPartnership()
+ fc.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_update_fc_partnership_one_system_idempotency(self, svc_authorize_mock,
+ svc_obj_info_mock,
+ svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'remote_system': 'cluster_A',
+ 'linkbandwidthmbits': 30,
+ 'backgroundcopyrate': 60,
+ 'state': 'present'
+ })
+
+ svc_obj_info_mock.side_effect = [
+ {'id': 0, 'link_bandwidth_mbits': '30', 'background_copy_rate': '60'},
+ {'id': 0, 'link_bandwidth_mbits': '30', 'background_copy_rate': '60'}
+ ]
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ fc = IBMSVFCPartnership()
+ fc.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_stop_fc_partnership(self, svc_authorize_mock,
+ svc_obj_info_mock,
+ svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'remote_clustername': 'remote_clustername',
+ 'remote_username': 'remote_username',
+ 'remote_password': 'remote_password',
+ 'remote_system': 'cluster_A',
+ 'stop': True,
+ 'state': 'present'
+ })
+
+ svc_obj_info_mock.side_effect = [
+ {'id': '0123456789'},
+ {'id': 0, 'link_bandwidth_mbits': '20', 'background_copy_rate': '50'},
+ {'id': 0, 'link_bandwidth_mbits': '20', 'background_copy_rate': '50'}
+ ]
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ fc = IBMSVFCPartnership()
+ fc.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_stop_fc_partnership_idempotency(self, svc_authorize_mock,
+ svc_obj_info_mock,
+ svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'remote_clustername': 'remote_clustername',
+ 'remote_username': 'remote_username',
+ 'remote_password': 'remote_password',
+ 'remote_system': 'cluster_A',
+ 'stop': True,
+ 'state': 'present'
+ })
+
+ svc_obj_info_mock.side_effect = [
+ {'id': '0123456789'},
+ {'id': 0, 'link_bandwidth_mbits': '20', 'background_copy_rate': '50'},
+ {'id': 0, 'link_bandwidth_mbits': '20', 'background_copy_rate': '50'}
+ ]
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ fc = IBMSVFCPartnership()
+ fc.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_delete_fc_partnership(self, svc_authorize_mock,
+ svc_obj_info_mock,
+ svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'remote_clustername': 'remote_clustername',
+ 'remote_username': 'remote_username',
+ 'remote_password': 'remote_password',
+ 'remote_system': 'cluster_A',
+ 'state': 'absent'
+ })
+
+ svc_obj_info_mock.side_effect = [
+ {'id': '0123456789'},
+ {'id': 0, 'link_bandwidth_mbits': '20', 'background_copy_rate': '50'},
+ {'id': 0, 'link_bandwidth_mbits': '20', 'background_copy_rate': '50'}
+ ]
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ fc = IBMSVFCPartnership()
+ fc.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_delete_fc_partnership_idempotency(self, svc_authorize_mock,
+ svc_obj_info_mock,
+ svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'remote_clustername': 'remote_clustername',
+ 'remote_username': 'remote_username',
+ 'remote_password': 'remote_password',
+ 'remote_system': 'cluster_A',
+ 'state': 'absent'
+ })
+
+ svc_obj_info_mock.side_effect = [
+ {'id': '0123456789'},
+ {},
+ {}
+ ]
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ fc = IBMSVFCPartnership()
+ fc.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_sv_manage_fcportsetmember.py b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_sv_manage_fcportsetmember.py
new file mode 100644
index 000000000..b06bcfa35
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_sv_manage_fcportsetmember.py
@@ -0,0 +1,215 @@
+# Copyright (C) 2023 IBM CORPORATION
+# Author(s): Sudheesh Reddy Satti<Sudheesh.Reddy.Satti@ibm.com>
+#
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+""" unit tests IBM Storage Virtualize Ansible module: ibm_sv_manage_fcportsetmember """
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import unittest
+import pytest
+import json
+from mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi
+from ansible_collections.ibm.storage_virtualize.plugins.modules.ibm_sv_manage_fcportsetmember import \
+ IBMSVFCPortsetmember
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module
+ creation """
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the
+ test case """
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the
+ test case """
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an
+ exception """
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs):
+ """function to patch over fail_json; package return data into an
+ exception """
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class TestIBMSVFCPortsetmember(unittest.TestCase):
+ """
+ Group of related Unit Tests
+ """
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def setUp(self, connect):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.restapi = IBMSVCRestApi(self.mock_module_helper, '1.2.3.4',
+ 'domain.ibm.com', 'username', 'password',
+ False, 'test.log', '')
+
+ def test_module_with_blank_values(self):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': '',
+ 'state': 'present'
+ })
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ IBMSVFCPortsetmember()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ def test_mutually_exclusive_case(self):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'portset0',
+ 'fcportid': "",
+ 'state': 'present'
+ })
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ IBMSVFCPortsetmember()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_sv_manage_fcportsetmember.IBMSVFCPortsetmember.add_fcportsetmember')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_add_fcportsetmemeber(self,
+ svc_authorize_mock,
+ add_fcportsetmember_mock,
+ svc_obj_info_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'portset0',
+ 'fcportid': '1',
+ 'state': 'present'
+ })
+ add_fcportsetmember = IBMSVFCPortsetmember()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ add_fcportsetmember.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_add_fcportsetmember_idempotency(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ svc_obj_info_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'portset0',
+ 'fcportid': '3',
+ 'state': 'present'
+ })
+
+ svc_obj_info_mock.return_value = {
+ "id": "4",
+ "fc_io_port_id": "1",
+ "portset_id": "3",
+ "portset_name": "portset0",
+ "owner_id": "",
+ "owner_name": ""
+ }
+ p = IBMSVFCPortsetmember()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ p.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_sv_manage_fcportsetmember.IBMSVFCPortsetmember.remove_fcportsetmember')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_remove_fcportsetmemeber(self,
+ svc_authorize_mock,
+ add_fcportsetmember_mock,
+ svc_obj_info_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'portset0',
+ 'fcportid': '1',
+ 'state': 'absent'
+ })
+ remove_fcportsetmember = IBMSVFCPortsetmember()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ remove_fcportsetmember.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_sv_manage_fcportsetmember.IBMSVFCPortsetmember.is_fcportsetmember_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_delete_portset_idempotency(self, svc_authorize_mock,
+ svc_run_command_mock,
+ fcportsetmember_exist_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'portset0',
+ 'fcportid': '3',
+ 'state': 'absent'
+ })
+
+ fcportsetmember_exist_mock.return_value = {}
+ p = IBMSVFCPortsetmember()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ p.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_sv_manage_ip_partnership.py b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_sv_manage_ip_partnership.py
new file mode 100644
index 000000000..0a441e3c4
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_sv_manage_ip_partnership.py
@@ -0,0 +1,797 @@
+# Copyright (C) 2022 IBM CORPORATION
+# Author(s): Sreshtant Bohidar <sreshtant.bohidar@ibm.com>
+#
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+""" unit tests IBM Storage Virtualize Ansible module: ibm_sv_manage_ip_partnership """
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import unittest
+import pytest
+import json
+from mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi
+from ansible_collections.ibm.storage_virtualize.plugins.modules.ibm_sv_manage_ip_partnership import IBMSVCIPPartnership
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module
+ creation """
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the
+ test case """
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the
+ test case """
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an
+ exception """
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an
+ exception """
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class TestIBMSVCIPPartnership(unittest.TestCase):
+ """ a group of related Unit Tests"""
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def setUp(self, connect):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.restapi = IBMSVCRestApi(self.mock_module_helper, '1.2.3.4',
+ 'domain.ibm.com', 'username', 'password',
+ False, 'test.log', '')
+
+ def set_default_args(self):
+ return dict({
+ 'name': 'test',
+ 'state': 'present'
+ })
+
+ def test_module_fail_when_required_args_missing(self):
+ """ required arguments are reported as errors """
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ IBMSVCIPPartnership()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_basic_checks(self, mock_auth):
+ set_module_args({
+ 'clustername': 'x.x.x.x',
+ 'domain': '',
+ 'username': 'username',
+ 'password': 'password',
+ 'remote_clustername': 'y.y.y.y',
+ 'remote_domain': '',
+ 'remote_username': 'remote_username',
+ 'remote_password': 'remote_password',
+ 'log_path': 'playbook.log',
+ 'state': 'present',
+ 'remote_clusterip': 'y.y.y.y',
+ 'type': 'ipv4',
+ 'linkbandwidthmbits': 100,
+ 'backgroundcopyrate': 50,
+ 'compressed': 'yes',
+ 'link1': 'portset2',
+ 'remote_link1': 'portset1'
+ })
+ ip = IBMSVCIPPartnership()
+ data = ip.basic_checks()
+ self.assertEqual(data, None)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_failure_state_missing(self, mock_svc_authorize):
+ set_module_args({
+ 'clustername': 'x.x.x.x',
+ 'domain': '',
+ 'username': 'username',
+ 'password': 'password',
+ 'remote_clustername': 'y.y.y.y',
+ 'remote_domain': '',
+ 'remote_username': 'remote_username',
+ 'remote_password': 'remote_password',
+ 'log_path': 'playbook.log',
+ 'remote_clusterip': 'y.y.y.y',
+ 'type': 'ipv4',
+ 'linkbandwidthmbits': 100,
+ 'backgroundcopyrate': 50,
+ 'compressed': 'yes',
+ 'link1': 'portset2',
+ 'remote_link1': 'portset1'
+ })
+ with pytest.raises(AnsibleFailJson) as exc:
+ ip = IBMSVCIPPartnership()
+ ip.apply()
+ print('Info: %s' % exc)
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_failure_remote_clusterip_missing(self, mock_svc_authorize):
+ set_module_args({
+ 'clustername': 'x.x.x.x',
+ 'domain': '',
+ 'username': 'username',
+ 'password': 'password',
+ 'remote_clustername': 'y.y.y.y',
+ 'remote_domain': '',
+ 'remote_username': 'remote username',
+ 'remote_password': 'remote_password',
+ 'log_path': 'playbook.log',
+ 'state': 'present',
+ 'type': 'ipv4',
+ 'linkbandwidthmbits': 100,
+ 'backgroundcopyrate': 50,
+ 'compressed': 'yes',
+ 'link1': 'portset2',
+ 'remote_link1': 'portset1'
+ })
+ with pytest.raises(AnsibleFailJson) as exc:
+ ip = IBMSVCIPPartnership()
+ ip.apply()
+ print('Info: %s' % exc)
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_get_ip(self, mock_auth, mock_soi):
+ set_module_args({
+ 'clustername': 'x.x.x.x',
+ 'domain': '',
+ 'username': 'username',
+ 'password': 'password',
+ 'remote_clustername': 'y.y.y.y',
+ 'remote_domain': '',
+ 'remote_username': 'remote username',
+ 'remote_password': 'remote_password',
+ 'log_path': 'playbook.log',
+ 'state': 'present',
+ 'remote_clusterip': 'y.y.y.y',
+ 'type': 'ipv4',
+ 'linkbandwidthmbits': 100,
+ 'backgroundcopyrate': 50,
+ 'compressed': 'yes',
+ 'link1': 'portset2',
+ 'remote_link1': 'portset1'
+ })
+ mock_soi.return_value = {
+ "id": "123456789",
+ "name": "Cluster_x.x.x.x",
+ "location": "local",
+ "partnership": "",
+ "total_mdisk_capacity": "1.7TB",
+ "space_in_mdisk_grps": "1.7TB",
+ "space_allocated_to_vdisks": "20.58GB",
+ "total_free_space": "1.7TB",
+ "total_vdiskcopy_capacity": "20.00GB",
+ "total_used_capacity": "19.02GB",
+ "total_overallocation": "1",
+ "total_vdisk_capacity": "20.00GB",
+ "total_allocated_extent_capacity": "21.00GB",
+ "statistics_status": "on",
+ "statistics_frequency": "15",
+ "cluster_locale": "",
+ "time_zone": "",
+ "code_level": "8.5.0.2 (build 157.12.2204111405000)",
+ "console_IP": "x.x.x.x:443",
+ "id_alias": "112233",
+ "gm_link_tolerance": "300",
+ "gm_inter_cluster_delay_simulation": "0",
+ "gm_intra_cluster_delay_simulation": "0",
+ "gm_max_host_delay": "5",
+ "email_reply": "",
+ "email_contact": "",
+ "email_contact_primary": "",
+ "email_contact_alternate": "",
+ "email_contact_location": "",
+ "email_contact2": "",
+ "email_contact2_primary": "",
+ "email_contact2_alternate": "",
+ "email_state": "stopped",
+ "inventory_mail_interval": "0",
+ "cluster_ntp_IP_address": "",
+ "cluster_isns_IP_address": "",
+ "iscsi_auth_method": "none",
+ "iscsi_chap_secret": "",
+ "auth_service_configured": "no",
+ "auth_service_enabled": "no",
+ "auth_service_url": "",
+ "auth_service_user_name": "",
+ "auth_service_pwd_set": "no",
+ "auth_service_cert_set": "no",
+ "auth_service_type": "ldap",
+ "relationship_bandwidth_limit": "25",
+ "tiers": [
+ {
+ "tier": "tier_scm",
+ "tier_capacity": "0.00MB",
+ "tier_free_capacity": "0.00MB"
+ },
+ {
+ "tier": "tier0_flash",
+ "tier_capacity": "1.74TB",
+ "tier_free_capacity": "1.72TB"
+ },
+ {
+ "tier": "tier1_flash",
+ "tier_capacity": "0.00MB",
+ "tier_free_capacity": "0.00MB"
+ },
+ {
+ "tier": "tier_enterprise",
+ "tier_capacity": "0.00MB",
+ "tier_free_capacity": "0.00MB"
+ },
+ {
+ "tier": "tier_nearline",
+ "tier_capacity": "0.00MB",
+ "tier_free_capacity": "0.00MB"
+ }
+ ],
+ "easy_tier_acceleration": "off",
+ "has_nas_key": "no",
+ "layer": "storage",
+ "rc_buffer_size": "256",
+ "compression_active": "no",
+ "compression_virtual_capacity": "0.00MB",
+ "compression_compressed_capacity": "0.00MB",
+ "compression_uncompressed_capacity": "0.00MB",
+ "cache_prefetch": "on",
+ "email_organization": "",
+ "email_machine_address": "",
+ "email_machine_city": "",
+ "email_machine_state": "XX",
+ "email_machine_zip": "",
+ "email_machine_country": "",
+ "total_drive_raw_capacity": "0",
+ "compression_destage_mode": "off",
+ "local_fc_port_mask": "1111111111111111111111111111111111111111111111111111111111111111",
+ "partner_fc_port_mask": "1111111111111111111111111111111111111111111111111111111111111111",
+ "high_temp_mode": "off",
+ "topology": "standard",
+ "topology_status": "",
+ "rc_auth_method": "none",
+ "vdisk_protection_time": "15",
+ "vdisk_protection_enabled": "yes",
+ "product_name": "IBM FlashSystem 9200",
+ "odx": "off",
+ "max_replication_delay": "0",
+ "partnership_exclusion_threshold": "315",
+ "gen1_compatibility_mode_enabled": "no",
+ "ibm_customer": "",
+ "ibm_component": "",
+ "ibm_country": "",
+ "tier_scm_compressed_data_used": "0.00MB",
+ "tier0_flash_compressed_data_used": "0.00MB",
+ "tier1_flash_compressed_data_used": "0.00MB",
+ "tier_enterprise_compressed_data_used": "0.00MB",
+ "tier_nearline_compressed_data_used": "0.00MB",
+ "total_reclaimable_capacity": "0.00MB",
+ "physical_capacity": "1.74TB",
+ "physical_free_capacity": "1.72TB",
+ "used_capacity_before_reduction": "0.00MB",
+ "used_capacity_after_reduction": "1.02GB",
+ "overhead_capacity": "18.00GB",
+ "deduplication_capacity_saving": "0.00MB",
+ "enhanced_callhome": "on",
+ "censor_callhome": "off",
+ "host_unmap": "on",
+ "backend_unmap": "on",
+ "quorum_mode": "standard",
+ "quorum_site_id": "",
+ "quorum_site_name": "",
+ "quorum_lease": "short",
+ "automatic_vdisk_analysis_enabled": "on",
+ "callhome_accepted_usage": "no",
+ "safeguarded_copy_suspended": "no"
+ }
+ restapi_local = IBMSVCRestApi(
+ self.mock_module_helper,
+ '1.2.3.4',
+ 'domain.ibm.com',
+ 'username',
+ 'password',
+ False,
+ 'test.log',
+ ''
+ )
+ ip = IBMSVCIPPartnership()
+ data = ip.get_ip(restapi_local)
+ self.assertEqual(data, 'x.x.x.x')
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_failure_get_ip(self, mock_auth, mock_soi):
+ set_module_args({
+ 'clustername': 'x.x.x.x',
+ 'domain': '',
+ 'username': 'username',
+ 'password': 'password',
+ 'remote_clustername': 'y.y.y.y',
+ 'remote_domain': '',
+ 'remote_username': 'remote username',
+ 'remote_password': 'remote_password',
+ 'log_path': 'playbook.log',
+ 'state': 'present',
+ 'remote_clusterip': 'y.y.y.y',
+ 'type': 'ipv4',
+ 'linkbandwidthmbits': 100,
+ 'backgroundcopyrate': 50,
+ 'compressed': 'yes',
+ 'link1': 'portset2',
+ 'remote_link1': 'portset1'
+ })
+ mock_soi.return_value = None
+ restapi_local = IBMSVCRestApi(
+ self.mock_module_helper,
+ '1.2.3.4',
+ 'domain.ibm.com',
+ 'username',
+ 'password',
+ False,
+ 'test.log',
+ ''
+ )
+ with pytest.raises(AnsibleFailJson) as exc:
+ ip = IBMSVCIPPartnership()
+ ip.get_ip(restapi_local)
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_get_all_partnership(self, mock_auth, mock_soi):
+ set_module_args({
+ 'clustername': 'x.x.x.x',
+ 'domain': '',
+ 'username': 'username',
+ 'password': 'password',
+ 'remote_clustername': 'y.y.y.y',
+ 'remote_domain': '',
+ 'remote_username': 'remote username',
+ 'remote_password': 'remote_password',
+ 'log_path': 'playbook.log',
+ 'state': 'present',
+ 'remote_clusterip': 'y.y.y.y',
+ 'type': 'ipv4',
+ 'linkbandwidthmbits': 100,
+ 'backgroundcopyrate': 50,
+ 'compressed': 'yes',
+ 'link1': 'portset2',
+ 'remote_link1': 'portset1'
+ })
+ mock_soi.return_value = [
+ {
+ "id": "1234",
+ "name": "Cluster_x.x.x.x",
+ "location": "local",
+ "partnership": "",
+ "type": "",
+ "cluster_ip": "",
+ "event_log_sequence": "",
+ "link1": "",
+ "link2": "",
+ "link1_ip_id": "",
+ "link2_ip_id": ""
+ }
+ ]
+ restapi_local = IBMSVCRestApi(
+ self.mock_module_helper,
+ '1.2.3.4',
+ 'domain.ibm.com',
+ 'username',
+ 'password',
+ False,
+ 'test.log',
+ ''
+ )
+ ip = IBMSVCIPPartnership()
+ data = ip.get_all_partnership(restapi_local)
+ self.assertEqual(data[0]['name'], 'Cluster_x.x.x.x')
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_filter_partnership(self, mock_auth):
+ set_module_args({
+ 'clustername': 'x.x.x.x',
+ 'domain': '',
+ 'username': 'username',
+ 'password': 'password',
+ 'remote_clustername': 'y.y.y.y',
+ 'remote_domain': '',
+ 'remote_username': 'remote username',
+ 'remote_password': 'remote_password',
+ 'log_path': 'playbook.log',
+ 'state': 'present',
+ 'remote_clusterip': 'y.y.y.y',
+ 'type': 'ipv4',
+ 'linkbandwidthmbits': 100,
+ 'backgroundcopyrate': 50,
+ 'compressed': 'yes',
+ 'link1': 'portset2',
+ 'remote_link1': 'portset1'
+ })
+ data = [
+ {
+ "id": "1234",
+ "name": "Cluster_x.x.x.x",
+ "location": "local",
+ "partnership": "",
+ "type": "",
+ "cluster_ip": "",
+ "event_log_sequence": "",
+ "link1": "",
+ "link2": "",
+ "link1_ip_id": "",
+ "link2_ip_id": ""
+ },
+ {
+ "id": "12345",
+ "name": "Cluster_z.z.z.z",
+ "location": "remote",
+ "partnership": "",
+ "type": "",
+ "cluster_ip": "z.z.z.z",
+ "event_log_sequence": "",
+ "link1": "",
+ "link2": "",
+ "link1_ip_id": "",
+ "link2_ip_id": ""
+ }
+ ]
+ restapi_local = IBMSVCRestApi(
+ self.mock_module_helper,
+ '1.2.3.4',
+ 'domain.ibm.com',
+ 'username',
+ 'password',
+ False,
+ 'test.log',
+ ''
+ )
+ ip = IBMSVCIPPartnership()
+ data = ip.filter_partnership(data, "z.z.z.z")
+ self.assertEqual(len(data), 1)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_get_partnership_detail(self, mock_auth, mock_soi):
+ set_module_args({
+ 'clustername': 'x.x.x.x',
+ 'domain': '',
+ 'username': 'username',
+ 'password': 'password',
+ 'remote_clustername': 'y.y.y.y',
+ 'remote_domain': '',
+ 'remote_username': 'remote username',
+ 'remote_password': 'remote_password',
+ 'log_path': 'playbook.log',
+ 'state': 'present',
+ 'remote_clusterip': 'y.y.y.y',
+ 'type': 'ipv4',
+ 'linkbandwidthmbits': 100,
+ 'backgroundcopyrate': 50,
+ 'compressed': 'yes',
+ 'link1': 'portset2',
+ 'remote_link1': 'portset1'
+ })
+ mock_soi.return_value = {
+ "id": "1234",
+ "name": "test_Cluster_x.x.x.x",
+ "location": "remote",
+ "partnership": "not_present",
+ "code_level": "8.5.1.0 (build 159.1.2203020902000)",
+ "console_IP": "x.x.x.x:443",
+ "gm_link_tolerance": "300",
+ "gm_inter_cluster_delay_simulation": "0",
+ "gm_intra_cluster_delay_simulation": "0",
+ "relationship_bandwidth_limit": "25",
+ "gm_max_host_delay": "5",
+ "type": "ipv4",
+ "cluster_ip": "x.x.x.x",
+ "chap_secret": "",
+ "event_log_sequence": "",
+ "link_bandwidth_mbits": "100",
+ "background_copy_rate": "50",
+ "max_replication_delay": "0",
+ "compressed": "yes",
+ "link1": "portset1",
+ "link2": "",
+ "link1_ip_id": "1",
+ "link2_ip_id": "",
+ "secured": "no"
+ }
+ restapi_local = IBMSVCRestApi(
+ self.mock_module_helper,
+ '1.2.3.4',
+ 'domain.ibm.com',
+ 'username',
+ 'password',
+ False,
+ 'test.log',
+ ''
+ )
+ ip = IBMSVCIPPartnership()
+ data = ip.get_partnership_detail(restapi_local, '1234')
+ self.assertEqual(data['id'], '1234')
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_partnership(self, mock_auth, mock_src):
+ set_module_args({
+ 'clustername': 'x.x.x.x',
+ 'domain': '',
+ 'username': 'username',
+ 'password': 'password',
+ 'remote_clustername': 'y.y.y.y',
+ 'remote_domain': '',
+ 'remote_username': 'remote username',
+ 'remote_password': 'remote_password',
+ 'log_path': 'playbook.log',
+ 'state': 'present',
+ 'remote_clusterip': 'y.y.y.y',
+ 'type': 'ipv4',
+ 'linkbandwidthmbits': 100,
+ 'backgroundcopyrate': 50,
+ 'compressed': 'yes',
+ 'link1': 'portset2',
+ 'remote_link1': 'portset1'
+ })
+ mock_src.return_value = ''
+ ip = IBMSVCIPPartnership()
+ data = ip.create_partnership('local', 'y.y.y.y')
+ self.assertEqual(data, None)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_remove_partnership(self, mock_auth, mock_src):
+ set_module_args({
+ 'clustername': 'x.x.x.x',
+ 'domain': '',
+ 'username': 'username',
+ 'password': 'password',
+ 'remote_clustername': 'y.y.y.y',
+ 'remote_domain': '',
+ 'remote_username': 'remote username',
+ 'remote_password': 'remote_password',
+ 'log_path': 'playbook.log',
+ 'state': 'absent',
+ 'remote_clusterip': 'y.y.y.y'
+ })
+ mock_src.return_value = ''
+ ip = IBMSVCIPPartnership()
+ data = ip.remove_partnership('local', 'y.y.y.y')
+ self.assertEqual(data, None)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_probe_partnership(self, mock_auth):
+ set_module_args({
+ 'clustername': 'x.x.x.x',
+ 'domain': '',
+ 'username': 'username',
+ 'password': 'password',
+ 'remote_clustername': 'y.y.y.y',
+ 'remote_domain': '',
+ 'remote_username': 'remote username',
+ 'remote_password': 'remote_password',
+ 'log_path': 'playbook.log',
+ 'state': 'present',
+ 'remote_clusterip': 'y.y.y.y',
+ 'type': 'ipv4',
+ 'linkbandwidthmbits': 100,
+ 'backgroundcopyrate': 50,
+ 'compressed': 'yes',
+ 'link1': 'portset1',
+ 'remote_link1': 'portset1'
+ })
+ local_data = {
+ "id": "1234",
+ "name": "test_Cluster_x.x.x.x",
+ "location": "remote",
+ "partnership": "not_present",
+ "code_level": "8.5.1.0 (build 159.1.2203020902000)",
+ "console_IP": "x.x.x.x:443",
+ "gm_link_tolerance": "300",
+ "gm_inter_cluster_delay_simulation": "0",
+ "gm_intra_cluster_delay_simulation": "0",
+ "relationship_bandwidth_limit": "25",
+ "gm_max_host_delay": "5",
+ "type": "ipv4",
+ "cluster_ip": "x.x.x.x",
+ "chap_secret": "",
+ "event_log_sequence": "",
+ "link_bandwidth_mbits": "100",
+ "background_copy_rate": "50",
+ "max_replication_delay": "0",
+ "compressed": "no",
+ "link1": "portset1",
+ "link2": "",
+ "link1_ip_id": "1",
+ "link2_ip_id": "",
+ "secured": "no"
+ }
+ remote_data = {
+ "id": "12345",
+ "name": "test_Cluster_x.x.x.x",
+ "location": "remote",
+ "partnership": "not_present",
+ "code_level": "8.5.1.0 (build 159.1.2203020902000)",
+ "console_IP": "x.x.x.x:443",
+ "gm_link_tolerance": "300",
+ "gm_inter_cluster_delay_simulation": "0",
+ "gm_intra_cluster_delay_simulation": "0",
+ "relationship_bandwidth_limit": "25",
+ "gm_max_host_delay": "5",
+ "type": "ipv4",
+ "cluster_ip": "x.x.x.x",
+ "chap_secret": "",
+ "event_log_sequence": "",
+ "link_bandwidth_mbits": "100",
+ "background_copy_rate": "50",
+ "max_replication_delay": "0",
+ "compressed": "no",
+ "link1": "portset1",
+ "link2": "",
+ "link1_ip_id": "1",
+ "link2_ip_id": "",
+ "secured": "no"
+ }
+ ip = IBMSVCIPPartnership()
+ local_modify, remote_modify = ip.probe_partnership(local_data, remote_data)
+ self.assertEqual(local_modify['compressed'], 'yes')
+ self.assertEqual(remote_modify['compressed'], 'yes')
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_start_partnership(self, mock_auth, mock_src):
+ set_module_args({
+ 'clustername': 'x.x.x.x',
+ 'domain': '',
+ 'username': 'username',
+ 'password': 'password',
+ 'remote_clustername': 'y.y.y.y',
+ 'remote_domain': '',
+ 'remote_username': 'remote username',
+ 'remote_password': 'remote_password',
+ 'log_path': 'playbook.log',
+ 'state': 'present',
+ 'remote_clusterip': 'y.y.y.y',
+ 'type': 'ipv4',
+ 'linkbandwidthmbits': 100,
+ 'backgroundcopyrate': 50,
+ 'compressed': 'yes',
+ 'link1': 'portset2',
+ 'remote_link1': 'portset1'
+ })
+ restapi_local = IBMSVCRestApi(
+ self.mock_module_helper,
+ '1.2.3.4',
+ 'domain.ibm.com',
+ 'username',
+ 'password',
+ False,
+ 'test.log',
+ ''
+ )
+ mock_src.return_value = ''
+ ip = IBMSVCIPPartnership()
+ data = ip.start_partnership(restapi_local, '0000020428A03B90')
+ self.assertEqual(data, None)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_stop_partnership(self, mock_auth, mock_src):
+ set_module_args({
+ 'clustername': 'x.x.x.x',
+ 'domain': '',
+ 'username': 'username',
+ 'password': 'password',
+ 'remote_clustername': 'y.y.y.y',
+ 'remote_domain': '',
+ 'remote_username': 'remote username',
+ 'remote_password': 'remote_password',
+ 'log_path': 'playbook.log',
+ 'state': 'present',
+ 'remote_clusterip': 'y.y.y.y',
+ 'type': 'ipv4',
+ 'linkbandwidthmbits': 100,
+ 'backgroundcopyrate': 50,
+ 'compressed': 'yes',
+ 'link1': 'portset2',
+ 'remote_link1': 'portset1'
+ })
+ restapi_local = IBMSVCRestApi(
+ self.mock_module_helper,
+ '1.2.3.4',
+ 'domain.ibm.com',
+ 'username',
+ 'password',
+ False,
+ 'test.log',
+ ''
+ )
+ mock_src.return_value = ''
+ ip = IBMSVCIPPartnership()
+ data = ip.stop_partnership(restapi_local, '1234')
+ self.assertEqual(data, None)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_update_partnership(self, mock_auth, mock_src):
+ set_module_args({
+ 'clustername': 'x.x.x.x',
+ 'domain': '',
+ 'username': 'username',
+ 'password': 'password',
+ 'remote_clustername': 'y.y.y.y',
+ 'remote_domain': '',
+ 'remote_username': 'remote username',
+ 'remote_password': 'remote_password',
+ 'log_path': 'playbook.log',
+ 'state': 'present',
+ 'remote_clusterip': 'y.y.y.y',
+ 'type': 'ipv4',
+ 'linkbandwidthmbits': 100,
+ 'backgroundcopyrate': 50,
+ 'compressed': 'yes',
+ 'link1': 'portset2',
+ 'remote_link1': 'portset1'
+ })
+ mock_src.return_value = ''
+ modify_local = {
+ 'linkbandwidthmbits': '101',
+ 'backgroundcopyrate': '52'
+ }
+ ip = IBMSVCIPPartnership()
+ data = ip.update_partnership('local', '1234', modify_local)
+ self.assertEqual(data, None)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_sv_manage_provisioning_policy.py b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_sv_manage_provisioning_policy.py
new file mode 100644
index 000000000..8b5e701c6
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_sv_manage_provisioning_policy.py
@@ -0,0 +1,370 @@
+# Copyright (C) 2022 IBM CORPORATION
+# Author(s): Sanjaikumaar M <sanjaikumaar.m@ibm.com>
+#
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+""" unit tests IBM Storage Virtualize Ansible module: ibm_sv_manage_provisioning_policy """
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import unittest
+import pytest
+import json
+from mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi
+from ansible_collections.ibm.storage_virtualize.plugins.modules.ibm_sv_manage_provisioning_policy import IBMSVProvisioningPolicy
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module
+ creation """
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the
+ test case """
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the
+ test case """
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an
+ exception """
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs):
+ """function to patch over fail_json; package return data into an
+ exception """
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class TestIBMSVProvisioningPolicy(unittest.TestCase):
+ """
+ Group of related Unit Tests
+ """
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def setUp(self, connect):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.restapi = IBMSVCRestApi(self.mock_module_helper, '1.2.3.4',
+ 'domain.ibm.com', 'username', 'password',
+ False, 'test.log', '')
+
+ def test_module_with_blank_values(self):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': ''
+ })
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ IBMSVProvisioningPolicy()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ def test_module_without_state_parameter(self):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'pp0',
+ })
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ IBMSVProvisioningPolicy()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_sv_manage_provisioning_policy.IBMSVProvisioningPolicy.is_pp_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_without_mandatory_parameter(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ pp_exists_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'pp0',
+ 'state': 'present'
+ })
+
+ pp_exists_mock.return_value = {}
+ pp = IBMSVProvisioningPolicy()
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ pp.apply()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_sv_manage_provisioning_policy.IBMSVProvisioningPolicy.is_pp_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_provisioning_policy(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ pp_exists_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'pp0',
+ 'capacitysaving': 'drivebased',
+ 'state': 'present'
+ })
+
+ pp_exists_mock.return_value = {}
+ pp = IBMSVProvisioningPolicy()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ pp.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_provisioning_policy_idempotency(self,
+ svc_authorize_mock,
+ svc_obj_info_mock,
+ svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'pp0',
+ 'capacitysaving': 'drivebased',
+ 'state': 'present'
+ })
+
+ svc_obj_info_mock.return_value = {'id': 0, 'name': 'pp0', 'capacity_saving': 'none'}
+ pp = IBMSVProvisioningPolicy()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ pp.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_sv_manage_provisioning_policy.IBMSVProvisioningPolicy.is_pp_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_rename_provisioning_policy(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ pp_exists_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'pp_new',
+ 'old_name': 'pp0',
+ 'state': 'present'
+ })
+
+ pp_exists_mock.side_effect = iter([
+ {'id': 0, 'name': 'pp0'},
+ {},
+ {'id': 0, 'name': 'pp0'}
+ ])
+ pp = IBMSVProvisioningPolicy()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ pp.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_sv_manage_provisioning_policy.IBMSVProvisioningPolicy.is_pp_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_rename_provisioning_policy_idempotency(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ pp_exists_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'pp_new',
+ 'old_name': 'pp0',
+ 'state': 'present'
+ })
+
+ pp_exists_mock.side_effect = iter([{}, {'id': 0, 'name': 'pp0'}, {}])
+ pp = IBMSVProvisioningPolicy()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ pp.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_update_with_capacitysaving(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ svc_obj_info_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'pp_new',
+ 'old_name': 'pp0',
+ 'capacitysaving': 'drivebased',
+ 'state': 'present'
+ })
+
+ svc_obj_info_mock.return_value = {'id': 0, 'name': 'pp0', 'capacity_saving': 'none'}
+ pp = IBMSVProvisioningPolicy()
+ with pytest.raises(AnsibleFailJson) as exc:
+ pp.apply()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_update_with_deduplicated(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ svc_obj_info_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'pp_new',
+ 'old_name': 'pp0',
+ 'deduplicated': True,
+ 'state': 'present'
+ })
+
+ svc_obj_info_mock.return_value = {'id': 0, 'name': 'pp0', 'deduplicated': 'no'}
+ pp = IBMSVProvisioningPolicy()
+ with pytest.raises(AnsibleFailJson) as exc:
+ pp.apply()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_sv_manage_provisioning_policy.IBMSVProvisioningPolicy.is_pp_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_delete_provisioning_policy(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ pp_exists_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'pp0',
+ 'state': 'absent'
+ })
+
+ pp_exists_mock.return_value = {'id': 0, 'name': 'pp0'}
+ pp = IBMSVProvisioningPolicy()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ pp.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_sv_manage_provisioning_policy.IBMSVProvisioningPolicy.is_pp_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_delete_provisioning_policy_idempotency(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ pp_exists_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'pp0',
+ 'state': 'absent'
+ })
+
+ pp_exists_mock.return_value = {}
+ pp = IBMSVProvisioningPolicy()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ pp.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_sv_manage_provisioning_policy.IBMSVProvisioningPolicy.is_pp_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_delete_provisioning_policy_validation(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ pp_exists_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'pp0',
+ 'capacitysaving': 'drivebased',
+ 'state': 'absent'
+ })
+
+ pp_exists_mock.return_value = {}
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ IBMSVProvisioningPolicy()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_sv_manage_replication_policy.py b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_sv_manage_replication_policy.py
new file mode 100644
index 000000000..c3f0e0535
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_sv_manage_replication_policy.py
@@ -0,0 +1,348 @@
+# Copyright (C) 2022 IBM CORPORATION
+# Author(s): Sanjaikumaar M <sanjaikumaar.m@ibm.com>
+#
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+""" unit tests IBM Storage Virtualize Ansible module: ibm_sv_manage_replication_policy """
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import unittest
+import pytest
+import json
+from mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi
+from ansible_collections.ibm.storage_virtualize.plugins.modules.ibm_sv_manage_replication_policy import IBMSVReplicationPolicy
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module
+ creation """
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the
+ test case """
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the
+ test case """
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an
+ exception """
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs):
+ """function to patch over fail_json; package return data into an
+ exception """
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class TestIBMSVReplicationPolicy(unittest.TestCase):
+ """
+ Group of related Unit Tests
+ """
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def setUp(self, connect):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.restapi = IBMSVCRestApi(self.mock_module_helper, '1.2.3.4',
+ 'domain.ibm.com', 'username', 'password',
+ False, 'test.log', '')
+
+ def test_module_with_blank_values(self):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': ''
+ })
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ IBMSVReplicationPolicy()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ def test_module_without_state_parameter(self):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'rp0',
+ })
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ IBMSVReplicationPolicy()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_sv_manage_replication_policy.IBMSVReplicationPolicy.is_rp_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_without_mandatory_parameter(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ rp_exists_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'rp0',
+ 'state': 'present'
+ })
+
+ rp_exists_mock.return_value = {}
+ svc_run_command_mock.side_effect = fail_json
+ rp = IBMSVReplicationPolicy()
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ rp.apply()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_sv_manage_replication_policy.IBMSVReplicationPolicy.is_rp_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_replication_policy(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ rp_exists_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'rp0',
+ 'topology': '2-site-async-dr',
+ 'location1system': 'cluster_A',
+ 'location1iogrp': 0,
+ 'location2system': 'cluster_B',
+ 'location2iogrp': 0,
+ 'rpoalert': 60,
+ 'state': 'present'
+ })
+
+ rp_exists_mock.return_value = {}
+ rp = IBMSVReplicationPolicy()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ rp.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_replication_policy_idempotency(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ svc_obj_info_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'rp0',
+ 'topology': '2-site-async-dr',
+ 'location1system': 'cluster_A',
+ 'location1iogrp': 0,
+ 'location2system': 'cluster_B',
+ 'location2iogrp': 0,
+ 'rpoalert': 60,
+ 'state': 'present'
+ })
+
+ svc_obj_info_mock.return_value = {
+ 'id': 0,
+ 'name': 'rp0',
+ 'topology': '2-site-async-dr',
+ 'location1_system_name': 'cluster_A',
+ 'location1_iogrp_id': '0',
+ 'location2_system_name': 'cluster_B',
+ 'location2_iogrp_id': '0',
+ 'rpo_alert': '60',
+ }
+ rp = IBMSVReplicationPolicy()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ rp.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_sv_manage_replication_policy.IBMSVReplicationPolicy.is_rp_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_ha_replication_policy(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ rp_exists_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'rp0',
+ 'topology': '2-site-ha',
+ 'location1system': 'cluster_A',
+ 'location1iogrp': 0,
+ 'location2system': 'cluster_B',
+ 'location2iogrp': 0,
+ 'state': 'present'
+ })
+
+ rp_exists_mock.return_value = {}
+ rp = IBMSVReplicationPolicy()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ rp.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_update_replication_policy(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ svc_obj_info_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'rp0',
+ 'location1system': 'cluster_C',
+ 'state': 'present'
+ })
+
+ svc_obj_info_mock.return_value = {
+ 'id': 0,
+ 'name': 'rp0',
+ 'location1_system_name': 'cluster_A'
+ }
+ rp = IBMSVReplicationPolicy()
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ rp.apply()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_sv_manage_replication_policy.IBMSVReplicationPolicy.is_rp_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_delete_replication_policy(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ rp_exists_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'rp0',
+ 'state': 'absent'
+ })
+
+ rp_exists_mock.return_value = {
+ 'id': 0,
+ 'name': 'rp0'
+ }
+ rp = IBMSVReplicationPolicy()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ rp.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_sv_manage_replication_policy.IBMSVReplicationPolicy.is_rp_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_delete_replication_policy_idempotency(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ rp_exists_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'rp0',
+ 'state': 'absent'
+ })
+
+ rp_exists_mock.return_value = {}
+ rp = IBMSVReplicationPolicy()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ rp.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_sv_manage_replication_policy.IBMSVReplicationPolicy.is_rp_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_delete_replication_policy_validation(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ rp_exists_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'rp0',
+ 'topology': '2-site-async-dr',
+ 'location1system': 'cluster_A',
+ 'location1iogrp': '0',
+ 'location2system': 'cluster_B',
+ 'location2iogrp': '0',
+ 'rpoalert': 60,
+ 'state': 'absent'
+ })
+
+ rp_exists_mock.return_value = {'id': 0, 'name': 'rp0'}
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ IBMSVReplicationPolicy()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_sv_manage_security.py b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_sv_manage_security.py
new file mode 100644
index 000000000..1a8e17dd9
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_sv_manage_security.py
@@ -0,0 +1,109 @@
+# Copyright (C) 2023 IBM CORPORATION
+# Author(s): Sumit Kumar Gupta <sumit.gupta16@ibm.com>
+#
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+""" unit tests IBM Storage Virtualize Ansible module: ibm_sv_manage_security """
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import unittest
+import pytest
+import json
+import random
+from mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi
+from ansible_collections.ibm.storage_virtualize.plugins.modules.ibm_sv_manage_security import IBMSVSecurityMgmt
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module
+ creation """
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the
+ test case """
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the
+ test case """
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an
+ exception """
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs):
+ """function to patch over fail_json; package return data into an
+ exception """
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class TestIBMSVSecurityMgmt(unittest.TestCase):
+ """
+ Group of related Unit Tests
+ """
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def setUp(self, connect):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.restapi = IBMSVCRestApi(self.mock_module_helper, '1.2.3.4',
+ 'domain.ibm.com', 'username', 'password',
+ False, 'test.log', '')
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ def test_change_security_settings(self,
+ svc_run_command_mock,
+ svc_authorize_mock):
+ '''
+ Generate a random number for parameter in their respective valid range and test chsecurity to have them changed.
+ '''
+ # Note: A fixed-value for params will not be correct way to test,
+ # as test may give false positive result from second time onwards.
+ ssh_grace_time_seconds = random.randint(1000, 1800)
+ clitimeout = random.randint(60, 120)
+ guitimeout = random.randint(60, 120)
+
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'sshgracetime': ssh_grace_time_seconds,
+ 'guitimeout': guitimeout,
+ 'clitimeout': clitimeout
+ })
+
+ rp = IBMSVSecurityMgmt()
+ svc_run_command_mock.return_value = ""
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ rp.apply()
+ print(exc.value.args[0])
+ self.assertTrue(exc.value.args[0]['changed'])
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_sv_manage_snapshot.py b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_sv_manage_snapshot.py
new file mode 100644
index 000000000..81673d358
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_sv_manage_snapshot.py
@@ -0,0 +1,793 @@
+# Copyright (C) 2022 IBM CORPORATION
+# Author(s): Sanjaikumaar M <sanjaikumaar.m@ibm.com>
+#
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+""" unit tests IBM Storage Virtualize Ansible module: ibm_sv_manage_snapshot """
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import unittest
+import pytest
+import json
+from mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi
+from ansible_collections.ibm.storage_virtualize.plugins.modules.ibm_sv_manage_snapshot import IBMSVSnapshot
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module
+ creation """
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the
+ test case """
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the
+ test case """
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an
+ exception """
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs):
+ """function to patch over fail_json; package return data into an
+ exception """
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class TestIBMSVSnapshot(unittest.TestCase):
+ """
+ Group of related Unit Tests
+ """
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def setUp(self, connect):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.restapi = IBMSVCRestApi(self.mock_module_helper, '1.2.3.4',
+ 'domain.ibm.com', 'username', 'password',
+ False, 'test.log', '')
+
+ def test_module_with_blank_values(self):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': ''
+ })
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ IBMSVSnapshot()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ def test_module_without_state_parameter(self):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'snapshot0',
+ 'src_volumegroup_name': 'volgrp0',
+ })
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ IBMSVSnapshot()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ def test_mutually_exclusive_case(self):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'snapshot0',
+ 'src_volumegroup_name': 'volgrp0',
+ 'src_volume_names': 'vol0:vol1',
+ 'state': 'present'
+ })
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ IBMSVSnapshot()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_sv_manage_snapshot.IBMSVSnapshot.is_snapshot_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_rename_snapshot_validation(self, svc_authorize_mock, snapshot_exists_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'snapshot0',
+ 'src_volumegroup_name': 'volgrp0',
+ 'old_name': 'snapshot0',
+ 'state': 'present',
+ })
+ snapshot_exists_mock.return_value = True
+ ss = IBMSVSnapshot()
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ ss.apply()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_sv_manage_snapshot.IBMSVSnapshot.is_snapshot_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_sv_manage_snapshot.IBMSVSnapshot.lsvolumegroupsnapshot')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_update_snapshot_validation_1(self, svc_authorize_mock,
+ lsvolumegroupsnapshot_mock,
+ snapshot_exists_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'snapshot0',
+ 'src_volumegroup_name': 'volgrp0',
+ 'ownershipgroup': 'owner0',
+ 'safeguarded': True,
+ 'retentiondays': 5,
+ 'state': 'present',
+ })
+ snapshot_exists_mock.return_value = True
+ lsvolumegroupsnapshot_mock.return_value = {'owner_name': '', 'safeguarded': 'yes'}
+ ss = IBMSVSnapshot()
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ ss.apply()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_sv_manage_snapshot.IBMSVSnapshot.is_snapshot_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_snapshot_validation(self, svc_authorize_mock,
+ snapshot_exists_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'snapshot0',
+ 'src_volumegroup_name': 'volgrp0',
+ 'ownershipgroup': 'owner0',
+ 'state': 'present',
+ })
+ snapshot_exists_mock.return_value = False
+ ss = IBMSVSnapshot()
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ ss.apply()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_sv_manage_snapshot.IBMSVSnapshot.is_snapshot_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_sv_manage_snapshot.IBMSVSnapshot.lsvolumegroupsnapshot')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_update_snapshot_validation_2(self, svc_authorize_mock,
+ lsvolumegroupsnapshot_mock,
+ snapshot_exists_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'snapshot0',
+ 'src_volumegroup_name': 'volgrp0',
+ 'snapshot_pool': 'childpool0',
+ 'ownershipgroup': 'owner0',
+ 'state': 'present',
+ })
+ snapshot_exists_mock.return_value = True
+ lsvolumegroupsnapshot_mock.return_value = {'owner_name': ''}
+ ss = IBMSVSnapshot()
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ ss.apply()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_sv_manage_snapshot.IBMSVSnapshot.is_snapshot_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_volumegroup_snapshot(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ snapshot_exists_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'snapshot0',
+ 'src_volumegroup_name': 'volgrp0',
+ 'state': 'present',
+ })
+
+ snapshot_exists_mock.return_value = {}
+
+ fc = IBMSVSnapshot()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ fc.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_sv_manage_snapshot.IBMSVSnapshot.is_snapshot_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_sv_manage_snapshot.IBMSVSnapshot.lsvolumegroupsnapshot')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_volumegroup_snapshot_idempotency(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ lsvg_mock,
+ snapshot_exists_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'snapshot0',
+ 'src_volumegroup_name': 'volgrp0',
+ 'state': 'present',
+ })
+
+ snapshot_exists_mock.return_value = {
+ "id": '0',
+ "name": 'snapshot0',
+ "owner_name": ''
+ }
+
+ lsvg_mock.return_value = {
+ "id": '0',
+ "name": 'snapshot0',
+ "owner_name": ''
+ }
+
+ fc = IBMSVSnapshot()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ fc.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_sv_manage_snapshot.IBMSVSnapshot.is_snapshot_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_safeguarded_volumegroup_snapshot(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ snapshot_exists_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'snapshot0',
+ 'src_volumegroup_name': 'volgrp0',
+ 'state': 'present',
+ 'safeguarded': True,
+ 'retentiondays': 2
+ })
+
+ snapshot_exists_mock.return_value = {}
+
+ fc = IBMSVSnapshot()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ fc.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_sv_manage_snapshot.IBMSVSnapshot.is_snapshot_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_volumegroup_snapshot_transient(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ snapshot_exists_mock):
+ # Create transient snapshots
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'snapshot0',
+ 'src_volumegroup_name': 'volgrp0',
+ 'state': 'present',
+ 'retentionminutes': 5
+ })
+
+ snapshot_exists_mock.return_value = {}
+
+ fc = IBMSVSnapshot()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ fc.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_sv_manage_snapshot.IBMSVSnapshot.is_snapshot_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_sv_manage_snapshot.IBMSVSnapshot.lsvolumegroupsnapshot')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_safeguarded_volumegroup_snapshot_idempotency(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ lsvg_mock,
+ snapshot_exists_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'snapshot0',
+ 'src_volumegroup_name': 'volgrp0',
+ 'state': 'present',
+ 'safeguarded': True,
+ 'retentiondays': 2
+ })
+
+ snapshot_exists_mock.return_value = {
+ "id": '0',
+ "name": 'snapshot0',
+ "owner_name": '',
+ 'safeguarded': 'yes'
+ }
+
+ lsvg_mock.return_value = {
+ "id": '0',
+ "name": 'snapshot0',
+ "owner_name": '',
+ 'safeguarded': 'yes'
+ }
+
+ fc = IBMSVSnapshot()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ fc.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_sv_manage_snapshot.IBMSVSnapshot.is_snapshot_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_volume_snapshot(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ snapshot_exists_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'snapshot0',
+ 'src_volume_names': 'vol0:vol1',
+ 'state': 'present',
+ })
+
+ snapshot_exists_mock.return_value = {}
+
+ fc = IBMSVSnapshot()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ fc.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_sv_manage_snapshot.IBMSVSnapshot.is_snapshot_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_sv_manage_snapshot.IBMSVSnapshot.lsvolumegroupsnapshot')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_volume_snapshot_idempotency(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ lsvg_mock,
+ snapshot_exists_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'snapshot0',
+ 'src_volume_names': 'vol0:vol1',
+ 'state': 'present',
+ })
+
+ snapshot_exists_mock.return_value = {
+ 'id': 1,
+ 'snapshot_name': 'snapshot0'
+ }
+
+ lsvg_mock.return_value = {
+ 'id': 1,
+ 'snapshot_name': 'snapshot0'
+ }
+
+ fc = IBMSVSnapshot()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ fc.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_sv_manage_snapshot.IBMSVSnapshot.is_snapshot_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_safeguarded_volume_snapshot(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ snapshot_exists_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'snapshot0',
+ 'src_volume_names': 'vol0:vol1',
+ 'state': 'present',
+ 'safeguarded': True,
+ 'retentiondays': 2
+ })
+
+ snapshot_exists_mock.return_value = {}
+
+ fc = IBMSVSnapshot()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ fc.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_sv_manage_snapshot.IBMSVSnapshot.is_snapshot_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_sv_manage_snapshot.IBMSVSnapshot.lsvolumegroupsnapshot')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_safeguarded_volume_snapshot_idempotency(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ lsvg_mock,
+ snapshot_exists_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'snapshot0',
+ 'src_volume_names': 'vol0:vol1',
+ 'state': 'present',
+ 'safeguarded': True,
+ 'retentiondays': 2
+ })
+
+ snapshot_exists_mock.return_value = {
+ 'id': 1,
+ 'snapshot_name': 'snapshot0',
+ 'safeguarded': 'yes'
+ }
+
+ lsvg_mock.return_value = {
+ 'id': 1,
+ 'snapshot_name': 'snapshot0',
+ 'safeguarded': 'yes'
+ }
+
+ fc = IBMSVSnapshot()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ fc.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_sv_manage_snapshot.IBMSVSnapshot.is_snapshot_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_sv_manage_snapshot.IBMSVSnapshot.lsvolumegroupsnapshot')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_sv_manage_snapshot.IBMSVSnapshot.snapshot_probe')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_update_snapshot_name_ownership(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ snapshot_probe_mock,
+ lsvg_mock,
+ snapshot_exists_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'snap0',
+ 'src_volumegroup_name': 'volgrp0',
+ 'old_name': 'snapshot0',
+ 'ownershipgroup': 'owner0',
+ 'state': 'present',
+ })
+
+ snapshot_probe_mock.return_value = ['name', 'ownershipgroup']
+
+ snapshot_exists_mock.return_value = {
+ "id": '0',
+ "name": 'snapshot0',
+ "owner_name": ''
+ }
+
+ lsvg_mock.return_value = {
+ "id": '0',
+ "name": 'snapshot0',
+ "owner_name": ''
+ }
+
+ fc = IBMSVSnapshot()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ fc.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_sv_manage_snapshot.IBMSVSnapshot.is_snapshot_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_sv_manage_snapshot.IBMSVSnapshot.lsvolumegroupsnapshot')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_update_snapshot_ownership_idempotency(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ lsvg_mock,
+ snapshot_exists_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'snap0',
+ 'src_volumegroup_name': 'volgrp0',
+ 'ownershipgroup': 'owner0',
+ 'state': 'present',
+ })
+
+ snapshot_exists_mock.return_value = {
+ "id": '0',
+ "name": 'snap0',
+ "owner_name": 'owner0'
+ }
+
+ lsvg_mock.return_value = {
+ "id": '0',
+ "name": 'snap0',
+ "owner_name": 'owner0'
+ }
+
+ fc = IBMSVSnapshot()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ fc.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_sv_manage_snapshot.IBMSVSnapshot.is_snapshot_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_restore_snapshot_volumegroup(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ snapshot_exists_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'snapshot0',
+ 'state': 'restore',
+ 'src_volumegroup_name': 'volumegroup0'
+ })
+
+ snapshot_exists_mock.side_effect = iter([
+ {
+ "id": '0',
+ "name": 'snapshot0',
+ "owner_name": ''
+ },
+ {}
+ ])
+
+ fc = IBMSVSnapshot()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ fc.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_sv_manage_snapshot.IBMSVSnapshot.is_snapshot_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_restore_snapshot_volume(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ snapshot_exists_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'snapshot0',
+ 'state': 'restore',
+ 'src_volumegroup_name': 'volumegroup0',
+ 'src_volume_names': "vol0:vol1"
+ })
+
+ snapshot_exists_mock.side_effect = iter([
+ {
+ "id": '0',
+ "name": 'snapshot0',
+ "owner_name": ''
+ },
+ {}
+ ])
+
+ fc = IBMSVSnapshot()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ fc.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_sv_manage_snapshot.IBMSVSnapshot.is_snapshot_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_delete_snapshot(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ snapshot_exists_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'snapshot0',
+ 'state': 'absent',
+ })
+
+ snapshot_exists_mock.side_effect = iter([
+ {
+ "id": '0',
+ "name": 'snapshot0',
+ "owner_name": ''
+ },
+ {}
+ ])
+
+ fc = IBMSVSnapshot()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ fc.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_sv_manage_snapshot.IBMSVSnapshot.is_snapshot_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_delete_snapshot_idempotency(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ snapshot_exists_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'snapshot0',
+ 'state': 'absent',
+ })
+
+ snapshot_exists_mock.return_value = {}
+
+ fc = IBMSVSnapshot()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ fc.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_sv_manage_snapshot.IBMSVSnapshot.is_snapshot_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_delete_dependent_snapshot(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ snapshot_exists_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'snapshot0',
+ 'state': 'absent',
+ })
+
+ snapshot_exists_mock.side_effect = iter([
+ {
+ "id": '0',
+ "name": 'snapshot0',
+ "owner_name": ''
+ },
+ {
+ "id": '0',
+ "name": 'snapshot0',
+ "owner_name": ''
+ }
+ ])
+
+ fc = IBMSVSnapshot()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ fc.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_sv_manage_snapshot_policy.py b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_sv_manage_snapshot_policy.py
new file mode 100644
index 000000000..9398ea56b
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_sv_manage_snapshot_policy.py
@@ -0,0 +1,332 @@
+# Copyright (C) 2022 IBM CORPORATION
+# Author(s): Shilpi Jain <shilpi1.jain@ibm.com>
+#
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+""" unit tests IBM Storage Virtualize Ansible module: ibm_sv_manage_snapshotpolicy """
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import unittest
+import pytest
+import json
+from mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi
+from ansible_collections.ibm.storage_virtualize.plugins.modules.ibm_sv_manage_snapshotpolicy import IBMSVCSnapshotPolicy
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module
+ creation """
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the
+ test case """
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the
+ test case """
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an
+ exception """
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs):
+ """function to patch over fail_json; package return data into an
+ exception """
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class TestIBMSVCSnapshotPolicy(unittest.TestCase):
+ """
+ Group of related Unit Tests
+ """
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def setUp(self, connect):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.restapi = IBMSVCRestApi(self.mock_module_helper, '1.2.3.4',
+ 'domain.ibm.com', 'username', 'password',
+ False, 'test.log', '')
+
+ def test_module_with_blank_values(self):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': ''
+ })
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ IBMSVCSnapshotPolicy()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ def test_module_without_state_parameter(self):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'snapshotpolicy',
+ 'backupunit': 'day',
+ 'backupinterval': '1',
+ 'backupstarttime': '2102281800',
+ 'retentiondays': '2'
+ })
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ IBMSVCSnapshotPolicy()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_sv_manage_snapshotpolicy.IBMSVCSnapshotPolicy.policy_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_snapshot_policy(self, svc_authorize_mock,
+ svc_run_command_mock, sp_exists_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'snapshotpolicy0',
+ 'backupunit': 'day',
+ 'backupinterval': '1',
+ 'backupstarttime': '2102281800',
+ 'retentiondays': '10',
+ 'state': 'present'
+ })
+
+ sp_exists_mock.return_value = {}
+
+ sp = IBMSVCSnapshotPolicy()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ sp.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_snapshotpolicy_idempotency(self, svc_authorize_mock,
+ svc_run_command_mock, svc_obj_info_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'snapshotpolicy0',
+ 'backupunit': 'day',
+ 'backupinterval': '1',
+ 'backupstarttime': '2102281800',
+ 'retentiondays': '10',
+ 'state': 'present'
+ })
+
+ svc_obj_info_mock.return_value = {
+ "policy_id": "3",
+ "policy_name": "snapshotpolicy0",
+ "schedule_id": "1",
+ "backup_unit": "day",
+ "backup_interval": "1",
+ "backup_start_time": "210228180000",
+ "retention_days": "10"
+ }
+
+ sp = IBMSVCSnapshotPolicy()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ sp.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_sv_manage_snapshotpolicy.IBMSVCSnapshotPolicy.policy_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_delete_snapshot_policy_failure(self, svc_authorize_mock,
+ svc_run_command_mock, sp_exists_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'snapshotpolicy0',
+ 'backupunit': 'day',
+ 'backupinterval': '1',
+ 'backupstarttime': '2102281800',
+ 'retentiondays': '10',
+ 'state': 'absent'
+ })
+
+ sp_exists_mock.return_value = {
+ "policy_id": "3",
+ "policy_name": "snapshotpolicy0",
+ "schedule_id": "1",
+ "backup_unit": "day",
+ "backup_interval": "1",
+ "backup_start_time": "210228180000",
+ "retention_days": "10"
+ }
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ IBMSVCSnapshotPolicy()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_sv_manage_snapshotpolicy.IBMSVCSnapshotPolicy.policy_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_delete_snapshot_policy(self, svc_authorize_mock,
+ svc_run_command_mock, sp_exists_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'snapshotpolicy0',
+ 'state': 'absent'
+ })
+
+ sp_exists_mock.return_value = {
+ "policy_id": "3",
+ "policy_name": "snapshotpolicy0",
+ "schedule_id": "1",
+ "backup_unit": "day",
+ "backup_interval": "1",
+ "backup_start_time": "210228180000",
+ "retention_days": "10"
+ }
+
+ sp = IBMSVCSnapshotPolicy()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ sp.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_sv_manage_snapshotpolicy.IBMSVCSnapshotPolicy.policy_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_delete_snapshotpolicy_idempotency(self, svc_authorize_mock,
+ svc_run_command_mock, sp_exists_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'snapshotpolicy0',
+ 'state': 'absent'
+ })
+
+ sp_exists_mock.return_value = {}
+
+ sp = IBMSVCSnapshotPolicy()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ sp.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_sv_manage_snapshotpolicy.IBMSVCSnapshotPolicy.policy_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_suspend_snapshotpolicy_failure(self, svc_authorize_mock,
+ svc_run_command_mock, sp_exists_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'snapshotpolicy0',
+ 'backupunit': 'day',
+ 'backupinterval': '1',
+ 'backupstarttime': '2102281800',
+ 'retentiondays': '10',
+ 'state': 'suspend'
+ })
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ IBMSVCSnapshotPolicy()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_sv_manage_snapshotpolicy.IBMSVCSnapshotPolicy.policy_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_suspend_snapshotpolicy(self, svc_authorize_mock,
+ svc_run_command_mock, sp_exists_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'state': 'suspend'
+ })
+
+ sp = IBMSVCSnapshotPolicy()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ sp.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_sv_manage_snapshotpolicy.IBMSVCSnapshotPolicy.policy_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_resume_snapshotpolicy(self, svc_authorize_mock,
+ svc_run_command_mock, sp_exists_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'state': 'resume'
+ })
+
+ sp = IBMSVCSnapshotPolicy()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ sp.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_sv_manage_ssl_certificate.py b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_sv_manage_ssl_certificate.py
new file mode 100644
index 000000000..3f4a86ee6
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_sv_manage_ssl_certificate.py
@@ -0,0 +1,94 @@
+# Copyright (C) 2022 IBM CORPORATION
+# Author(s): Sanjaikumaar M <sanjaikumaar.m@ibm.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+""" unit tests IBM Storage Virtualize Ansible module: ibm_sv_manage_ssl_certificate """
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import unittest
+import pytest
+import json
+from mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi
+from ansible_collections.ibm.storage_virtualize.plugins.modules.ibm_sv_manage_ssl_certificate import IBMSVSSLCertificate
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module
+ creation """
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the
+ test case """
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the
+ test case """
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an
+ exception """
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs):
+ """function to patch over fail_json; package return data into an
+ exception """
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class TestIBMSVSSLCertificate(unittest.TestCase):
+ """
+ Group of related Unit Tests
+ """
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def setUp(self, connect):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.restapi = IBMSVCRestApi(self.mock_module_helper, '1.2.3.4',
+ 'domain.ibm.com', 'username', 'password',
+ False, 'test.log', '')
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_module_export_certificate(self, svc_authorize_mock, svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'certificate_type': 'system'
+ })
+
+ cert = IBMSVSSLCertificate()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ cert.apply()
+
+ self.assertTrue(exc.value.args[0]['changed'])
+ self.assertEqual(svc_run_command_mock.call_count, 1)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_sv_manage_storage_partition.py b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_sv_manage_storage_partition.py
new file mode 100644
index 000000000..147882236
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_sv_manage_storage_partition.py
@@ -0,0 +1,392 @@
+# Copyright (C) 2023 IBM CORPORATION
+# Author(s): Shilpi Jain<shilpi.jain1@ibm.com>
+#
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+""" unit tests IBM Storage Virtualize Ansible module: ibm_sv_manage_storage_partition """
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import unittest
+import pytest
+import json
+from mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi
+from ansible_collections.ibm.storage_virtualize.plugins.modules.ibm_sv_manage_storage_partition import IBMSVStoragePartition
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module
+ creation """
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the
+ test case """
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the
+ test case """
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an
+ exception """
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs):
+ """function to patch over fail_json; package return data into an
+ exception """
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class TestIBMSVStoragePartition(unittest.TestCase):
+ """
+ Group of related Unit Tests
+ """
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def setUp(self, connect):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.restapi = IBMSVCRestApi(self.mock_module_helper, '1.2.3.4',
+ 'domain.ibm.com', 'username', 'password',
+ False, 'test.log', '')
+
+ def test_module_with_blank_values(self):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': '',
+ 'state': 'present'
+ })
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ IBMSVStoragePartition()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ def test_invalid_params_while_creation(self):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'partition1',
+ 'deletenonpreferredmanagementobjects': 'True',
+ 'deletepreferredmanagementobjects': 'True',
+ 'state': 'present'
+ })
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ IBMSVStoragePartition()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_sv_manage_storage_partition.IBMSVStoragePartition.get_storage_partition_details')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_storage_partition_success(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ server_exist_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'partition1',
+ 'state': 'present'
+ })
+
+ server_exist_mock.return_value = {}
+ p = IBMSVStoragePartition()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ p.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_sv_manage_storage_partition.IBMSVStoragePartition.get_storage_partition_details')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_storage_partition_with_optional_params(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ server_exist_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'partition1',
+ 'state': 'present',
+ 'replicationpolicy': 'policy0'
+ })
+
+ server_exist_mock.return_value = {}
+ p = IBMSVStoragePartition()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ p.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_storage_partition_idempotency(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ svc_obj_info_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'partition1',
+ 'state': 'present'
+ })
+
+ svc_obj_info_mock.return_value = {
+ "id": "1",
+ "name": "partition1",
+ "preferred_management_system_name": "",
+ "active_management_system_name": "",
+ "replication_policy_name": "",
+ "replication_policy_id": "",
+ "location1_system_name": "",
+ "location1_status": "",
+ "location2_system_name": "",
+ "location2_status": "",
+ "host_count": "0",
+ "host_offline_count": "0",
+ "volume_group_count": "0",
+ "ha_status": "",
+ "link_status": ""
+ }
+ p = IBMSVStoragePartition()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ p.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_update_storage_partition(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ svc_obj_info_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'partition1',
+ 'preferredmanagementsystem': 'system1',
+ 'state': 'present'
+ })
+
+ svc_obj_info_mock.return_value = {
+ "id": "1",
+ "name": "partition1",
+ "preferred_management_system_name": "",
+ "active_management_system_name": "",
+ "replication_policy_name": "",
+ "replication_policy_id": "",
+ "location1_system_name": "",
+ "location1_status": "",
+ "location2_system_name": "",
+ "location2_status": "",
+ "host_count": "0",
+ "host_offline_count": "0",
+ "volume_group_count": "0",
+ "ha_status": "",
+ "link_status": ""
+ }
+ p = IBMSVStoragePartition()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ p.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_remove_replication_policy_storage_partition(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ svc_obj_info_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'partition1',
+ 'noreplicationpolicy': 'True',
+ 'state': 'present'
+ })
+
+ svc_obj_info_mock.return_value = {
+ "id": "1",
+ "name": "partition1",
+ "preferred_management_system_name": "",
+ "active_management_system_name": "",
+ "replication_policy_name": "policy0",
+ "location1_system_name": "",
+ "location1_status": "",
+ "location2_system_name": "",
+ "location2_status": "",
+ "host_count": "0",
+ "host_offline_count": "0",
+ "volume_group_count": "0",
+ "ha_status": "",
+ "link_status": ""
+ }
+ p = IBMSVStoragePartition()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ p.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_sv_manage_storage_partition.IBMSVStoragePartition.get_storage_partition_details')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_delete_storage_partition_with_invalid_param(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ server_exist_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'partition1',
+ 'replicationpolicy': 'policy1',
+ 'state': 'absent'
+ })
+
+ server_exist_mock.return_value = {
+ "id": "1",
+ "name": "partition1",
+ "preferred_management_system_name": "",
+ "active_management_system_name": "",
+ "replication_policy_name": "",
+ "replication_policy_id": "",
+ "location1_system_name": "",
+ "location1_status": "",
+ "location2_system_name": "",
+ "location2_status": "",
+ "host_count": "0",
+ "host_offline_count": "0",
+ "volume_group_count": "0",
+ "ha_status": "",
+ "link_status": ""
+ }
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ IBMSVStoragePartition()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_sv_manage_storage_partition.IBMSVStoragePartition.get_storage_partition_details')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_delete_storage_partition(self, svc_authorize_mock,
+ svc_run_command_mock,
+ server_exist_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'partition1',
+ 'state': 'absent'
+ })
+
+ server_exist_mock.return_value = {
+ "id": "1",
+ "name": "partition1",
+ "preferred_management_system_name": "",
+ "active_management_system_name": "",
+ "replication_policy_name": "",
+ "replication_policy_id": "",
+ "location1_system_name": "",
+ "location1_status": "",
+ "location2_system_name": "",
+ "location2_status": "",
+ "host_count": "0",
+ "host_offline_count": "0",
+ "volume_group_count": "0",
+ "ha_status": "",
+ "link_status": ""
+ }
+ p = IBMSVStoragePartition()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ p.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_sv_manage_storage_partition.IBMSVStoragePartition.get_storage_partition_details')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_delete_storage_partition_idempotency(self, svc_authorize_mock,
+ svc_run_command_mock,
+ server_exist_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'partition1',
+ 'state': 'absent'
+ })
+
+ server_exist_mock.return_value = {}
+ p = IBMSVStoragePartition()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ p.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_sv_manage_syslog_server.py b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_sv_manage_syslog_server.py
new file mode 100644
index 000000000..31f3d9f1b
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_sv_manage_syslog_server.py
@@ -0,0 +1,396 @@
+# Copyright (C) 2023 IBM CORPORATION
+# Author(s): Shilpi Jain<shilpi.jain1@ibm.com>
+#
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+""" unit tests IBM Storage Virtualize Ansible module: ibm_sv_manage_syslog_server """
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import unittest
+import pytest
+import json
+from mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi
+from ansible_collections.ibm.storage_virtualize.plugins.modules.ibm_sv_manage_syslog_server import IBMSVSyslogserver
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module
+ creation """
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the
+ test case """
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the
+ test case """
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an
+ exception """
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs):
+ """function to patch over fail_json; package return data into an
+ exception """
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class TestIBMSVSyslogserver(unittest.TestCase):
+ """
+ Group of related Unit Tests
+ """
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def setUp(self, connect):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.restapi = IBMSVCRestApi(self.mock_module_helper, '1.2.3.4',
+ 'domain.ibm.com', 'username', 'password',
+ False, 'test.log', '')
+
+ def test_module_with_blank_values(self):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': '',
+ 'state': 'present'
+ })
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ IBMSVSyslogserver()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ def test_mutually_exclusive_case_1(self):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'server1',
+ 'cadf': 'on',
+ 'facility': 1,
+ 'state': 'present'
+ })
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ IBMSVSyslogserver()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ def test_mutually_exclusive_case_2(self):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'server1',
+ 'port': '1010',
+ 'state': 'present'
+ })
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ IBMSVSyslogserver()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_sv_manage_syslog_server.IBMSVSyslogserver.get_syslog_server_details')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_syslog_server_success(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ server_exist_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'server0',
+ 'ip': '1.1.1.1',
+ 'state': 'present'
+ })
+
+ server_exist_mock.return_value = {}
+ p = IBMSVSyslogserver()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ p.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_sv_manage_syslog_server.IBMSVSyslogserver.get_syslog_server_details')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_syslog_server_with_optional_params(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ server_exist_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'server',
+ 'state': 'present',
+ 'ip': '1.1.1.1',
+ 'info': 'off',
+ 'warning': 'off'
+ })
+
+ server_exist_mock.return_value = {}
+ p = IBMSVSyslogserver()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ p.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_syslog_server_idempotency(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ svc_obj_info_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'server0',
+ 'state': 'present'
+ })
+
+ svc_obj_info_mock.return_value = {
+ "id": "1",
+ "name": "server0",
+ "IP_address": "1.1.1.1",
+ "error": "on",
+ "warning": "on",
+ "info": "on",
+ "cadf": "off",
+ "audit": "off",
+ "login": "off",
+ "facility": "0",
+ "protocol": "udp",
+ "port": "514"
+ }
+ p = IBMSVSyslogserver()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ p.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_update_syslog_server(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ svc_obj_info_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'server0',
+ 'ip': '1.1.1.1',
+ 'info': 'off',
+ 'state': 'present'
+ })
+
+ svc_obj_info_mock.return_value = {
+ "id": "1",
+ "name": "server0",
+ "IP_address": "1.1.1.1",
+ "error": "on",
+ "warning": "on",
+ "info": "on",
+ "cadf": "off",
+ "audit": "off",
+ "login": "off",
+ "facility": "0",
+ "protocol": "udp",
+ "port": "514"
+ }
+ p = IBMSVSyslogserver()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ p.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_rename_syslog_server(self, svc_authorize_mock,
+ svc_run_command_mock,
+ svc_obj_info_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'new_server0',
+ 'old_name': 'server0',
+ 'state': 'present'
+ })
+ svc_obj_info_mock.return_value = {
+ "id": "1",
+ "name": "server0",
+ "IP_address": "1.1.1.1",
+ "error": "on",
+ "warning": "on",
+ "info": "on",
+ "cadf": "off",
+ "audit": "off",
+ "login": "off",
+ "facility": "0",
+ "protocol": "udp",
+ "port": "514"
+ }
+
+ arg_data = []
+ v = IBMSVSyslogserver()
+ data = v.rename_server(arg_data)
+ self.assertEqual(data, 'Syslog server [server0] has been successfully rename to [new_server0].')
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_sv_manage_syslog_server.IBMSVSyslogserver.get_syslog_server_details')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_delete_syslog_server_with_invalid_param(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ server_exist_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'server0',
+ 'ip': '1.1.1.1',
+ 'error': 'off',
+ 'state': 'absent'
+ })
+
+ server_exist_mock.return_value = {
+ "id": "1",
+ "name": "server0",
+ "IP_address": "1.1.1.1",
+ "error": "on",
+ "warning": "on",
+ "info": "on",
+ "cadf": "off",
+ "audit": "off",
+ "login": "off",
+ "facility": "0",
+ "protocol": "udp",
+ "port": "514"
+ }
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ IBMSVSyslogserver()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_sv_manage_syslog_server.IBMSVSyslogserver.get_syslog_server_details')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_delete_syslog_server(self, svc_authorize_mock,
+ svc_run_command_mock,
+ server_exist_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'server0',
+ 'state': 'absent'
+ })
+
+ server_exist_mock.return_value = {
+ "id": "1",
+ "name": "server0",
+ "IP_address": "1.1.1.1",
+ "error": "on",
+ "warning": "on",
+ "info": "on",
+ "cadf": "off",
+ "audit": "off",
+ "login": "off",
+ "facility": "0",
+ "protocol": "udp",
+ "port": "514"
+ }
+ p = IBMSVSyslogserver()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ p.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_sv_manage_syslog_server.IBMSVSyslogserver.get_syslog_server_details')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_delete_syslog_server_idempotency(self, svc_authorize_mock,
+ svc_run_command_mock,
+ server_exist_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'server0',
+ 'state': 'absent'
+ })
+
+ server_exist_mock.return_value = {}
+ p = IBMSVSyslogserver()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ p.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_sv_manage_truststore_for_replication.py b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_sv_manage_truststore_for_replication.py
new file mode 100644
index 000000000..e795b37d5
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_sv_manage_truststore_for_replication.py
@@ -0,0 +1,322 @@
+# Copyright (C) 2020 IBM CORPORATION
+# Author(s): Sanjaikumaar M <sanjaikumaar.m@ibm.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+""" unit tests IBM Storage Virtualize Ansible module: ibm_sv_manage_truststore_for_replication """
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import unittest
+import pytest
+import json
+from mock import patch, Mock
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.ibm.storage_virtualize.plugins.modules.ibm_sv_manage_truststore_for_replication import (
+ IBMSVTrustStore
+)
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module
+ creation """
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the
+ test case """
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the
+ test case """
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an
+ exception """
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an
+ exception """
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class TestIBMSVTrustStore(unittest.TestCase):
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+
+ def test_module_mandatory_parameter(self):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'username': 'username',
+ 'password': 'password',
+ 'state': 'present'
+ })
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ IBMSVTrustStore()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible.module_utils.compat.paramiko.paramiko.SSHClient')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.'
+ 'module_utils.ibm_svc_ssh.IBMSVCssh._svc_connect')
+ def test_module_create_truststore_with_name(self, svc_connect_mock, ssh_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'ansi_store',
+ 'remote_clustername': 'x.x.x.x',
+ 'remote_username': 'remote_username',
+ 'remote_password': 'remote_password',
+ 'state': 'present'
+ })
+ con_mock = Mock()
+ svc_connect_mock.return_value = True
+ ssh_mock.return_value = con_mock
+ stdin = Mock()
+ stdout = Mock()
+ stderr = Mock()
+ con_mock.exec_command.return_value = (stdin, stdout, stderr)
+ stdout.read.side_effect = iter([br'{}', b'', b''])
+ stdout.channel.recv_exit_status.return_value = 0
+
+ ts = IBMSVTrustStore()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ ts.apply()
+
+ self.assertTrue(exc.value.args[0]['changed'])
+ self.assertTrue('ansi_store' in exc.value.args[0]['msg'])
+
+ @patch('ansible.module_utils.compat.paramiko.paramiko.SSHClient')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.'
+ 'module_utils.ibm_svc_ssh.IBMSVCssh._svc_connect')
+ def test_module_create_truststore_with_name_idempotency(self,
+ svc_connect_mock,
+ ssh_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'ansi_store',
+ 'remote_clustername': 'x.x.x.x',
+ 'remote_username': 'remote_username',
+ 'remote_password': 'remote_password',
+ 'state': 'present'
+ })
+ con_mock = Mock()
+ svc_connect_mock.return_value = True
+ ssh_mock.return_value = con_mock
+ stdin = Mock()
+ stdout = Mock()
+ stderr = Mock()
+ con_mock.exec_command.return_value = (stdin, stdout, stderr)
+ stdout.read.side_effect = iter([br'{"name":"ansi_store"}', b'', b''])
+ stdout.channel.recv_exit_status.return_value = 0
+
+ ts = IBMSVTrustStore()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ ts.apply()
+
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ @patch('ansible.module_utils.compat.paramiko.paramiko.SSHClient')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.'
+ 'module_utils.ibm_svc_ssh.IBMSVCssh._svc_connect')
+ def test_module_create_truststore_without_name(self, svc_connect_mock,
+ ssh_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'username': 'username',
+ 'password': 'password',
+ 'remote_clustername': 'x.x.x.x',
+ 'remote_username': 'remote_username',
+ 'remote_password': 'remote_password',
+ 'state': 'present'
+ })
+ con_mock = Mock()
+ svc_connect_mock.return_value = True
+ ssh_mock.return_value = con_mock
+ stdin = Mock()
+ stdout = Mock()
+ stderr = Mock()
+ con_mock.exec_command.return_value = (stdin, stdout, stderr)
+ stdout.read.side_effect = iter([br'{}', b'', b''])
+ stdout.channel.recv_exit_status.return_value = 0
+
+ ts = IBMSVTrustStore()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ ts.apply()
+
+ self.assertTrue(exc.value.args[0]['changed'])
+ self.assertTrue('store_x.x.x.x' in exc.value.args[0]['msg'])
+
+ @patch('ansible.module_utils.compat.paramiko.paramiko.SSHClient')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.'
+ 'module_utils.ibm_svc_ssh.IBMSVCssh._svc_connect')
+ def test_module_create_truststore_without_name_idempotency(self, svc_connect_mock,
+ ssh_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'username': 'username',
+ 'password': 'password',
+ 'remote_clustername': 'x.x.x.x',
+ 'remote_username': 'remote_username',
+ 'remote_password': 'remote_password',
+ 'state': 'present'
+ })
+ con_mock = Mock()
+ svc_connect_mock.return_value = True
+ ssh_mock.return_value = con_mock
+ stdin = Mock()
+ stdout = Mock()
+ stderr = Mock()
+ con_mock.exec_command.return_value = (stdin, stdout, stderr)
+ stdout.read.side_effect = iter([br'{"name": "store_x.x.x.x"}', b'', b''])
+ stdout.channel.recv_exit_status.return_value = 0
+
+ ts = IBMSVTrustStore()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ ts.apply()
+
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ @patch('ansible.module_utils.compat.paramiko.paramiko.SSHClient')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.'
+ 'module_utils.ibm_svc_ssh.IBMSVCssh._svc_connect')
+ def test_module_delete_truststore_with_name(self, svc_connect_mock,
+ ssh_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'ansi_store',
+ 'remote_clustername': 'x.x.x.x',
+ 'state': 'absent'
+ })
+ con_mock = Mock()
+ svc_connect_mock.return_value = True
+ ssh_mock.return_value = con_mock
+ stdin = Mock()
+ stdout = Mock()
+ stderr = Mock()
+ con_mock.exec_command.return_value = (stdin, stdout, stderr)
+ stdout.read.side_effect = iter([br'{"name": "ansi_store"}', b'', b''])
+ stdout.channel.recv_exit_status.return_value = 0
+
+ ts = IBMSVTrustStore()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ ts.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible.module_utils.compat.paramiko.paramiko.SSHClient')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.'
+ 'module_utils.ibm_svc_ssh.IBMSVCssh._svc_connect')
+ def test_module_delete_truststore_with_name_idempotency(self, svc_connect_mock,
+ ssh_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'username': 'username',
+ 'password': 'password',
+ 'remote_clustername': 'x.x.x.x',
+ 'state': 'absent'
+ })
+ con_mock = Mock()
+ svc_connect_mock.return_value = True
+ ssh_mock.return_value = con_mock
+ stdin = Mock()
+ stdout = Mock()
+ stderr = Mock()
+ con_mock.exec_command.return_value = (stdin, stdout, stderr)
+ stdout.read.side_effect = iter([br'{}', b'', b''])
+ stdout.channel.recv_exit_status.return_value = 0
+
+ ts = IBMSVTrustStore()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ ts.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ @patch('ansible.module_utils.compat.paramiko.paramiko.SSHClient')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.'
+ 'module_utils.ibm_svc_ssh.IBMSVCssh._svc_connect')
+ def test_module_delete_truststore_without_name(self, svc_connect_mock,
+ ssh_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'username': 'username',
+ 'password': 'password',
+ 'remote_clustername': 'x.x.x.x',
+ 'state': 'absent'
+ })
+ con_mock = Mock()
+ svc_connect_mock.return_value = True
+ ssh_mock.return_value = con_mock
+ stdin = Mock()
+ stdout = Mock()
+ stderr = Mock()
+ con_mock.exec_command.return_value = (stdin, stdout, stderr)
+ stdout.read.side_effect = iter([br'{"name": "store_x.x.x.x"}', b'', b''])
+ stdout.channel.recv_exit_status.return_value = 0
+
+ ts = IBMSVTrustStore()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ ts.apply()
+
+ self.assertTrue(exc.value.args[0]['changed'])
+ self.assertTrue('store_x.x.x.x' in exc.value.args[0]['msg'])
+
+ @patch('ansible.module_utils.compat.paramiko.paramiko.SSHClient')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.'
+ 'module_utils.ibm_svc_ssh.IBMSVCssh._svc_connect')
+ def test_module_delete_truststore_without_name_idempotency(self, svc_connect_mock,
+ ssh_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'username': 'username',
+ 'password': 'password',
+ 'remote_clustername': 'x.x.x.x',
+ 'state': 'absent'
+ })
+ con_mock = Mock()
+ svc_connect_mock.return_value = True
+ ssh_mock.return_value = con_mock
+ stdin = Mock()
+ stdout = Mock()
+ stderr = Mock()
+ con_mock.exec_command.return_value = (stdin, stdout, stderr)
+ stdout.read.side_effect = iter([br'{}', b'', b''])
+ stdout.channel.recv_exit_status.return_value = 0
+
+ ts = IBMSVTrustStore()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ ts.apply()
+
+ self.assertFalse(exc.value.args[0]['changed'])
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_sv_restore_cloud_backup.py b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_sv_restore_cloud_backup.py
new file mode 100644
index 000000000..68920caeb
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_sv_restore_cloud_backup.py
@@ -0,0 +1,204 @@
+# Copyright (C) 2022 IBM CORPORATION
+# Author(s): Sanjaikumaar M <sanjaikumaar.m@ibm.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+""" unit tests IBM Storage Virtualize Ansible module: ibm_sv_restore_cloud_backup """
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import unittest
+import pytest
+import json
+from mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi
+from ansible_collections.ibm.storage_virtualize.plugins.modules.ibm_sv_restore_cloud_backup import IBMSVRestoreCloudBackup
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module
+ creation """
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the
+ test case """
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the
+ test case """
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an
+ exception """
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs):
+ """function to patch over fail_json; package return data into an
+ exception """
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class TestIBMSVRestoreCloudBackup(unittest.TestCase):
+ """
+ Group of related Unit Tests
+ """
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def setUp(self, connect):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.restapi = IBMSVCRestApi(self.mock_module_helper, '1.2.3.4',
+ 'domain.ibm.com', 'username', 'password',
+ False, 'test.log', '')
+
+ def test_missing_mandatory_parameter(self):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password'
+ })
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ IBMSVRestoreCloudBackup()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ def test_cancel_with_invalid_parameters(self):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'cancel': 'true',
+ 'source_volume_uid': '83094832040980',
+ 'generation': 1,
+ 'target_volume_name': 'vol1'
+ })
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ IBMSVRestoreCloudBackup()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_token_wrap')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_restore_volume(self, svc_authorize_mock,
+ svc_obj_info_mock,
+ svc_token_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'source_volume_uid': '83094832040980',
+ 'generation': 1,
+ 'target_volume_name': 'vol1'
+ })
+
+ svc_obj_info_mock.return_value = {'id': 1, 'name': 'volume_backup'}
+ svc_token_mock.return_value = {'out': ''}
+ with pytest.raises(AnsibleExitJson) as exc:
+ aws = IBMSVRestoreCloudBackup()
+ aws.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_token_wrap')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_restore_volume_idempotency(self, svc_authorize_mock,
+ svc_obj_info_mock,
+ svc_token_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'source_volume_uid': '83094832040980',
+ 'generation': 1,
+ 'target_volume_name': 'vol1'
+ })
+
+ aws = IBMSVRestoreCloudBackup()
+ svc_obj_info_mock.return_value = {'id': 1, 'name': 'volume_backup'}
+ svc_token_mock.return_value = {'out': b'CMMVC9103E'}
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ aws.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_token_wrap')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_cancel_restore(self, svc_authorize_mock,
+ svc_obj_info_mock,
+ svc_token_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'cancel': True,
+ 'target_volume_name': 'vol1'
+ })
+
+ aws = IBMSVRestoreCloudBackup()
+ svc_obj_info_mock.return_value = {'id': 1, 'name': 'vol1', 'restore_status': 'restoring'}
+ svc_token_mock.return_value = {'out': ''}
+ with pytest.raises(AnsibleExitJson) as exc:
+ aws.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_token_wrap')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_cancel_restore_idempotency(self, svc_authorize_mock,
+ svc_obj_info_mock,
+ svc_token_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'cancel': True,
+ 'target_volume_name': 'vol1'
+ })
+
+ aws = IBMSVRestoreCloudBackup()
+ svc_obj_info_mock.return_value = {'id': 1, 'name': 'vol1', 'restore_status': 'available'}
+ svc_token_mock.return_value = {'out': ''}
+ with pytest.raises(AnsibleExitJson) as exc:
+ aws.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_sv_switch_replication_direction.py b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_sv_switch_replication_direction.py
new file mode 100644
index 000000000..b3e0b67e8
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_sv_switch_replication_direction.py
@@ -0,0 +1,144 @@
+# Copyright (C) 2022 IBM CORPORATION
+# Author(s): Shilpi Jain <shilpi.jain1@ibm.com>
+#
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+""" unit tests IBM Storage Virtualize Ansible module: ibm_sv_switch_replication_direction """
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import unittest
+import pytest
+import json
+from mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi
+from ansible_collections.ibm.storage_virtualize.plugins.modules.ibm_sv_switch_replication_direction import IBMSVSwitchReplication
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation """
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the
+ test case """
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the
+ test case """
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an
+ exception """
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an
+ exception """
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class TestIBMSVSwitchReplication(unittest.TestCase):
+ """ a group of related Unit Tests"""
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def setUp(self, connect):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.restapi = IBMSVCRestApi(self.mock_module_helper, '1.2.3.4',
+ 'domain.ibm.com', 'username', 'password',
+ False, 'test.log', '')
+
+ def set_default_args(self):
+ return dict({
+ 'name': 'test',
+ 'mode': 'independent'
+ })
+
+ def test_module_fail_when_required_args_missing(self):
+ """ required arguments are reported as errors """
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ IBMSVSwitchReplication()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_change_mode(self, svc_authorize_mock, svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'mode': 'independent',
+ })
+ svc_run_command_mock.return_value = ''
+ obj = IBMSVSwitchReplication()
+ return_data = obj.change_vg_mode()
+ self.assertEqual(None, return_data)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_sv_switch_replication_direction.IBMSVSwitchReplication.get_volumegroup_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_failure_nonexistent_volumegroup(self, svc_authorize_mock, svc_run_command_mock, get_volumegroup_info_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'mode': 'independent',
+ })
+ get_volumegroup_info_mock.return_value = {}
+ with pytest.raises(AnsibleFailJson) as exc:
+ obj = IBMSVSwitchReplication()
+ obj.apply()
+ self.assertEqual('Volume group does not exist: [test_name]', exc.value.args[0]['msg'])
+ self.assertEqual(True, exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_sv_switch_replication_direction.IBMSVSwitchReplication.change_vg_mode')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_for_failure_with_unsupported_state(self, svc_authorize_mock, svc_run_command_mock, change_vg_mode_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'state': 'wrong_state',
+ })
+ with pytest.raises(AnsibleFailJson) as exc:
+ obj = IBMSVSwitchReplication()
+ obj.apply()
+ self.assertEqual(True, exc.value.args[0]["failed"])
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_auth.py b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_auth.py
new file mode 100644
index 000000000..b38d7df4d
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_auth.py
@@ -0,0 +1,115 @@
+# Copyright (C) 2020 IBM CORPORATION
+# Author(s): Shilpi Jain <shilpi.jain1@ibm.com>
+#
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+""" unit tests IBM Storage Virtualize Ansible module: ibm_svc_auth """
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import unittest
+import pytest
+import json
+from mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi
+from ansible_collections.ibm.storage_virtualize.plugins.modules.ibm_svc_auth import IBMSVCauth, main
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module
+ creation """
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the
+ test case """
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the
+ test case """
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an
+ exception """
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an
+ exception """
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class TestIBMSVCauth(unittest.TestCase):
+ """ a group of related Unit Tests"""
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def setUp(self, connect):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.restapi = IBMSVCRestApi(self.mock_module_helper, '1.2.3.4',
+ 'domain.ibm.com', 'username', 'password',
+ False, 'test.log', '')
+
+ def set_default_args(self):
+ return dict({
+ 'name': 'test',
+ 'state': 'present'
+ })
+
+ def test_module_fail_when_required_args_missing(self):
+ """ required arguments are reported as errors """
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ IBMSVCauth()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.get_auth_token')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_module_token_returned_successfully(self, svc_authorize_mock, get_auth_token_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ })
+ svc_authorize_mock.return_value = None
+ with pytest.raises(AnsibleExitJson) as exc:
+ main()
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.get_auth_token')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_module_token_return_failure(self, svc_authorize_mock, get_auth_token_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ })
+ svc_authorize_mock.return_value = 'a2ca1d31d663ce181b955c07f51a000c2f75835b3d87735d1f334cf4b913880c'
+ with pytest.raises(AnsibleFailJson) as exc:
+ main()
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_complete_initial_setup.py b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_complete_initial_setup.py
new file mode 100644
index 000000000..0519d1c8e
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_complete_initial_setup.py
@@ -0,0 +1,133 @@
+# Copyright (C) 2022 IBM CORPORATION
+# Author(s): Sreshtant Bohidar <sreshtant.bohidar@ibm.com>
+#
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+""" unit tests IBM Storage Virtualize Ansible module: ibm_svc_complete_intial_setup """
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import unittest
+import pytest
+import json
+from mock import patch
+from ansible.module_utils.compat.paramiko import paramiko
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.ibm.storage_virtualize.plugins.modules.ibm_svc_complete_initial_setup import IBMSVCCompleteSetup
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module
+ creation """
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the
+ test case """
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the
+ test case """
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an
+ exception """
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an
+ exception """
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class TestIBMSVCInitS(unittest.TestCase):
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+
+ def set_default_args(self):
+ return dict({
+ 'clustername': 'clustername',
+ 'username': 'username',
+ 'password': 'password',
+ })
+
+ def test_ssh_connect_with_missing_username(self):
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({
+ 'clustername': 'clustername',
+ 'password': 'password',
+ })
+ IBMSVCCompleteSetup()
+ print('Info: %s' % exc.value.args[0]['msg'])
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ def test_ssh_connect_with_missing_password(self):
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({
+ 'clustername': 'clustername',
+ 'username': 'username',
+ })
+ IBMSVCCompleteSetup()
+ print('Info: %s' % exc.value.args[0]['msg'])
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_complete_initial_setup.IBMSVCCompleteSetup.is_lmc')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_ssh.IBMSVCssh._svc_disconnect')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_ssh.IBMSVCssh._svc_connect')
+ def test_setup_with_lmc(self, connect_mock, disconnect_mock, lmc_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'username': 'username',
+ 'password': 'password'
+ })
+ lmc_mock.return_value = True
+ patch.object(paramiko.SSHClient, 'exec_command')
+ conn = IBMSVCCompleteSetup()
+ with pytest.raises(Exception) as exc:
+ conn.apply()
+ print('Info: %s' % exc)
+ self.assertTrue(exc)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_complete_initial_setup.IBMSVCCompleteSetup.is_lmc')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_ssh.IBMSVCssh._svc_disconnect')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_ssh.IBMSVCssh._svc_connect')
+ def test_setup_without_lmc(self, connect_mock, disconnect_mock, lmc_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'username': 'username',
+ 'password': 'password'
+ })
+ lmc_mock.return_value = False
+ patch.object(paramiko.SSHClient, 'exec_command')
+ conn = IBMSVCCompleteSetup()
+ with pytest.raises(Exception) as exc:
+ conn.apply()
+ print('Info: %s' % exc)
+ self.assertFalse(exc.value.args[0]['changed'])
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_host.py b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_host.py
new file mode 100644
index 000000000..8d4d56e4f
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_host.py
@@ -0,0 +1,682 @@
+# Copyright (C) 2020 IBM CORPORATION
+# Author(s): Peng Wang <wangpww@cn.ibm.com>
+# Sreshtant Bohidar <sreshtant.bohidar@ibm.com>
+# Sudheesh Reddy Satti<Sudheesh.Reddy.Satti@ibm.com>
+#
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+""" unit tests IBM Storage Virtualize Ansible module: ibm_svc_host """
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import unittest
+import pytest
+import json
+from mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi
+from ansible_collections.ibm.storage_virtualize.plugins.modules.ibm_svc_host import IBMSVChost
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module
+ creation """
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the
+ test case """
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the
+ test case """
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an
+ exception """
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an
+ exception """
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class TestIBMSVChost(unittest.TestCase):
+ """ a group of related Unit Tests"""
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def setUp(self, connect):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.restapi = IBMSVCRestApi(self.mock_module_helper, '1.2.3.4',
+ 'domain.ibm.com', 'username', 'password',
+ False, 'test.log', '')
+ self.existing_fcwwpn = []
+
+ def set_default_args(self):
+ return dict({
+ 'name': 'test',
+ 'state': 'present'
+ })
+
+ def test_module_fail_when_required_args_missing(self):
+ """ required arguments are reported as errors """
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ IBMSVChost()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_get_existing_host(self, svc_authorize_mock, svc_obj_info_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'ansible_host',
+ })
+ host_ret = [{"id": "1", "name": "ansible_host", "port_count": "1",
+ "iogrp_count": "4", "status": "offline",
+ "site_id": "", "site_name": "",
+ "host_cluster_id": "", "host_cluster_name": "",
+ "protocol": "scsi", "owner_id": "",
+ "owner_name": ""}]
+ svc_obj_info_mock.return_value = host_ret
+ host = IBMSVChost().get_existing_host('ansible_host')
+ self.assertEqual('ansible_host', host['name'])
+ self.assertEqual('1', host['id'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_host.IBMSVChost.get_existing_host')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_host_create_get_existing_host_called(self, svc_authorize_mock,
+ get_existing_host_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_host',
+ })
+ get_existing_host_mock.return_value = [1]
+ host_created = IBMSVChost()
+ with pytest.raises(AnsibleExitJson) as exc:
+ host_created.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_host.IBMSVChost.get_existing_host')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_host.IBMSVChost.host_probe')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_host_but_host_existed(self, svc_authorize_mock,
+ host_probe_mock,
+ get_existing_host_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'ansible_host',
+ })
+ host_ret = [{"id": "1", "name": "ansible_host", "port_count": "1",
+ "iogrp_count": "4", "status": "offline",
+ "site_id": "", "site_name": "",
+ "host_cluster_id": "", "host_cluster_name": "",
+ "protocol": "scsi", "owner_id": "",
+ "owner_name": ""}]
+ get_existing_host_mock.return_value = host_ret
+ host_probe_mock.return_value = []
+ host_created = IBMSVChost()
+ with pytest.raises(AnsibleExitJson) as exc:
+ host_created.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_host.IBMSVChost.get_existing_host')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_host.IBMSVChost.host_create')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_host_successfully(self, svc_authorize_mock,
+ host_create_mock,
+ get_existing_host_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'ansible_host',
+ 'fcwwpn': '100000109B570216'
+ })
+ host = {u'message': u'Host, id [14], '
+ u'successfully created', u'id': u'14'}
+ host_create_mock.return_value = host
+ get_existing_host_mock.return_value = []
+ host_created = IBMSVChost()
+ with pytest.raises(AnsibleExitJson) as exc:
+ host_created.apply()
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_host.IBMSVChost.get_existing_host')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_host_failed_since_missed_required_param(
+ self, svc_authorize_mock, get_existing_host_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'ansible_host',
+ })
+ get_existing_host_mock.return_value = []
+ host_created = IBMSVChost()
+ with pytest.raises(AnsibleFailJson) as exc:
+ host_created.apply()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_host.IBMSVChost.get_existing_host')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_delete_host_but_host_not_existed(self, svc_authorize_mock,
+ get_existing_host_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'absent',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'ansible_host',
+ })
+ get_existing_host_mock.return_value = []
+ host_deleted = IBMSVChost()
+ with pytest.raises(AnsibleExitJson) as exc:
+ host_deleted.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_host.IBMSVChost.get_existing_host')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_host.IBMSVChost.host_delete')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_delete_host_successfully(self, svc_authorize_mock,
+ host_delete_mock,
+ get_existing_host_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'absent',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'ansible_host',
+ })
+ host_ret = [{"id": "1", "name": "ansible_host", "port_count": "1",
+ "iogrp_count": "4", "status": "offline",
+ "site_id": "", "site_name": "",
+ "host_cluster_id": "", "host_cluster_name": "",
+ "protocol": "scsi", "owner_id": "",
+ "owner_name": ""}]
+ get_existing_host_mock.return_value = host_ret
+ host_deleted = IBMSVChost()
+ with pytest.raises(AnsibleExitJson) as exc:
+ host_deleted.apply()
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_host.IBMSVChost.host_fcwwpn_update')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_host.IBMSVChost.get_existing_host')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_fcwwpn_update_when_existing_absent(self, svc_authorize_mock, get_existing_host_mock, host_fcwwpn_update_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test',
+ 'state': 'present',
+ 'fcwwpn': '1000001AA0570262',
+ 'protocol': 'scsi',
+ 'type': 'generic'
+ })
+ lshost_data = {'id': '24', 'name': 'test', 'port_count': '5', 'type': 'generic',
+ 'mask': '1111111', 'iogrp_count': '4', 'status': 'offline',
+ 'site_id': '', 'site_name': '', 'host_cluster_id': '', 'host_cluster_name': '',
+ 'protocol': 'scsi', 'nodes': [{'WWPN': '1000001AA0570260', 'node_logged_in_count': '0', 'state': 'online'},
+ {'WWPN': '1000001AA0570261', 'node_logged_in_count': '0', 'state': 'online'},
+ {'WWPN': '1000001AA0570262', 'node_logged_in_count': '0', 'state': 'online'}]}
+ get_existing_host_mock.return_value = lshost_data
+ host_created = IBMSVChost()
+ with pytest.raises(AnsibleExitJson) as exc:
+ host_created.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_host.IBMSVChost.host_fcwwpn_update')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_host.IBMSVChost.get_existing_host')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_fcwwpn_update_when_new_added(self, svc_authorize_mock, get_existing_host_mock, host_fcwwpn_update_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test',
+ 'state': 'present',
+ 'fcwwpn': '1000001AA0570260:1000001AA0570261:1000001AA0570262:1000001AA0570264',
+ 'protocol': 'scsi',
+ 'type': 'generic'
+ })
+ lshost_data = {'id': '24', 'name': 'test', 'port_count': '5', 'type': 'generic',
+ 'mask': '1111111', 'iogrp_count': '4', 'status': 'offline',
+ 'site_id': '', 'site_name': '', 'host_cluster_id': '', 'host_cluster_name': '',
+ 'protocol': 'scsi', 'nodes': [{'WWPN': '1000001AA0570260', 'node_logged_in_count': '0', 'state': 'online'},
+ {'WWPN': '1000001AA0570261', 'node_logged_in_count': '0', 'state': 'online'},
+ {'WWPN': '1000001AA0570262', 'node_logged_in_count': '0', 'state': 'online'}]}
+ get_existing_host_mock.return_value = lshost_data
+ host_created = IBMSVChost()
+ with pytest.raises(AnsibleExitJson) as exc:
+ host_created.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_host.IBMSVChost.host_fcwwpn_update')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_host.IBMSVChost.get_existing_host')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_fcwwpn_update_when_existing_removes_and_new_added(self, svc_authorize_mock, get_existing_host_mock, host_fcwwpn_update_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test',
+ 'state': 'present',
+ 'fcwwpn': '1000001AA0570264:1000001AA0570265:1000001AA0570266',
+ 'protocol': 'scsi',
+ 'type': 'generic'
+ })
+ lshost_data = {'id': '24', 'name': 'test', 'port_count': '5', 'type': 'generic',
+ 'mask': '1111111', 'iogrp_count': '4', 'status': 'offline',
+ 'site_id': '', 'site_name': '', 'host_cluster_id': '', 'host_cluster_name': '',
+ 'protocol': 'scsi', 'nodes': [{'WWPN': '1000001AA0570260', 'node_logged_in_count': '0', 'state': 'online'},
+ {'WWPN': '1000001AA0570261', 'node_logged_in_count': '0', 'state': 'online'},
+ {'WWPN': '1000001AA0570262', 'node_logged_in_count': '0', 'state': 'online'}]}
+ get_existing_host_mock.return_value = lshost_data
+ host_created = IBMSVChost()
+ with pytest.raises(AnsibleExitJson) as exc:
+ host_created.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_host_fcwwpn_update(self, svc_authorize_mock, svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test',
+ 'state': 'present',
+ 'fcwwpn': '1000001AA0570264:1000001AA0570265:1000001AA0570266',
+ 'protocol': 'scsi',
+ 'type': 'generic'
+ })
+ obj = IBMSVChost()
+ obj.existing_fcwwpn = ['1000001AA0570262', '1000001AA0570263', '1000001AA0570264']
+ obj.input_fcwwpn = ['1000001AA0570264', '1000001AA0570265', '1000001AA0570266']
+ self.assertEqual(obj.host_fcwwpn_update(), None)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_host_site_update(self, svc_authorize_mock, svc_obj_info_mock, src):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test',
+ 'state': 'present',
+ 'fcwwpn': '1000001AA0570260:1000001AA0570261:1000001AA0570262',
+ 'protocol': 'scsi',
+ 'type': 'generic',
+ 'site': 'site1'
+ })
+ svc_obj_info_mock.return_value = {
+ 'id': '24', 'name': 'test', 'port_count': '5', 'type': 'generic',
+ 'mask': '1111111', 'iogrp_count': '4', 'status': 'offline',
+ 'site_id': '', 'site_name': 'site2', 'host_cluster_id': '', 'host_cluster_name': '',
+ 'protocol': 'scsi', 'nodes': [
+ {'WWPN': '1000001AA0570260', 'node_logged_in_count': '0', 'state': 'online'},
+ {'WWPN': '1000001AA0570261', 'node_logged_in_count': '0', 'state': 'online'},
+ {'WWPN': '1000001AA0570262', 'node_logged_in_count': '0', 'state': 'online'}
+ ]
+ }
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj = IBMSVChost()
+ obj.apply()
+ self.assertEqual(True, exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_host_hostcluster_update(self, svc_authorize_mock, svc_obj_info_mock, src):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test',
+ 'state': 'present',
+ 'protocol': 'scsi',
+ 'type': 'generic',
+ 'site': 'site1',
+ 'hostcluster': 'hostcluster0'
+ })
+ svc_obj_info_mock.return_value = {
+ 'id': '24', 'name': 'test', 'port_count': '5', 'type': 'generic',
+ 'mask': '1111111', 'iogrp_count': '4', 'status': 'offline',
+ 'site_id': '', 'site_name': 'site2', 'host_cluster_id': '1', 'host_cluster_name': 'hostcluster0'
+ }
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj = IBMSVChost()
+ obj.apply()
+ self.assertEqual(True, exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_duplicate_checker(self, svc_authorize_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test',
+ 'state': 'present',
+ 'fcwwpn': '1000001AA0570260:1000001AA0570260:1000001AA0570260',
+ 'protocol': 'scsi',
+ 'type': 'generic',
+ 'site': 'site1'
+ })
+ with pytest.raises(AnsibleFailJson) as exc:
+ obj = IBMSVChost()
+ obj.apply()
+ self.assertEqual(True, exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_host_rename(self, mock_auth, mock_old, mock_cmd):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'old_name': 'name',
+ 'name': 'new_name',
+ 'state': 'present',
+ })
+ mock_old.return_value = [
+ {
+ "id": "1", "name": "ansible_host", "port_count": "1",
+ "iogrp_count": "4", "status": "offline",
+ "site_id": "", "site_name": "",
+ "host_cluster_id": "", "host_cluster_name": "",
+ "protocol": "scsi", "owner_id": "",
+ "owner_name": ""
+ }
+ ]
+ arg_data = []
+ mock_cmd.return_value = None
+ v = IBMSVChost()
+ data = v.host_rename(arg_data)
+ self.assertEqual(data, 'Host [name] has been successfully rename to [new_name].')
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_host_rename_failure_for_unsupported_param(self, am):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'old_name': 'name',
+ 'name': 'new_name',
+ 'state': 'present',
+ 'fcwwpn': True
+ })
+ with pytest.raises(AnsibleFailJson) as exc:
+ v = IBMSVChost()
+ v.apply()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_host.IBMSVChost.host_iscsiname_update')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_host.IBMSVChost.get_existing_host')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_iscsiname_update_when_existing_absent(self, svc_authorize_mock, get_existing_host_mock, host_iscsinmae_update_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test',
+ 'state': 'present',
+ 'iscsiname': 'iqn.1994-05.com.redhat:2e358e438b8a',
+ 'protocol': 'scsi',
+ 'type': 'generic'
+ })
+ lshost_data = {'id': '24', 'name': 'test', 'port_count': '5', 'type': 'generic',
+ 'mask': '1111111', 'iogrp_count': '4', 'status': 'offline',
+ 'site_id': '', 'site_name': '', 'host_cluster_id': '', 'host_cluster_name': '',
+ 'protocol': 'scsi', 'nodes': [{'iscsi_name': 'iqn.1994-05.com.redhat:2e358e438b8a', 'node_logged_in_count': '0', 'state': 'offline'},
+ {'iscsi_name': 'iqn.localhost.hostid.7f000001', 'node_logged_in_count': '0', 'state': 'offline'},
+ {'iscsi_name': 'iqn.localhost.hostid.7f000002', 'node_logged_in_count': '0', 'state': 'offline'}]}
+ get_existing_host_mock.return_value = lshost_data
+ host_created = IBMSVChost()
+ with pytest.raises(AnsibleExitJson) as exc:
+ host_created.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_host.IBMSVChost.host_iscsiname_update')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_host.IBMSVChost.get_existing_host')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_iscsiname_update_when_new_added(self, svc_authorize_mock, get_existing_host_mock, host_iscsiname_update_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test',
+ 'state': 'present',
+ 'iscsiname': 'iqn.1994-05.com.redhat:2e358e438b8a,iqn.localhost.hostid.7f000001,iqn.localhost.hostid.7f000002',
+ 'protocol': 'scsi',
+ 'type': 'generic'
+ })
+ lshost_data = {'id': '24', 'name': 'test', 'port_count': '5', 'type': 'generic',
+ 'mask': '1111111', 'iogrp_count': '4', 'status': 'offline',
+ 'site_id': '', 'site_name': '', 'host_cluster_id': '', 'host_cluster_name': '',
+ 'protocol': 'scsi', 'nodes': [{'iscsi_name': 'iqn.1994-05.com.redhat:2e358e438b8a', 'node_logged_in_count': '0', 'state': 'offline'},
+ {'iscsi_name': 'iqn.localhost.hostid.7f000001', 'node_logged_in_count': '0', 'state': 'offline'}]}
+ get_existing_host_mock.return_value = lshost_data
+ host_created = IBMSVChost()
+ with pytest.raises(AnsibleExitJson) as exc:
+ host_created.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_host_iscsiname_update(self, svc_authorize_mock, svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test',
+ 'state': 'present',
+ 'iscsiname': 'iqn.1994-05.com.redhat:2e358e438b8a,iqn.localhost.hostid.7f000002',
+ 'protocol': 'scsi',
+ 'type': 'generic'
+ })
+ obj = IBMSVChost()
+ obj.existing_iscsiname = ['iqn.1994-05.com.redhat:2e358e438b8a', 'iqn.localhost.hostid.7f000001']
+ obj.input_iscsiname = ['iqn.1994-05.com.redhat:2e358e438b8a', 'iqn.localhost.hostid.7f000002']
+ self.assertEqual(obj.host_iscsiname_update(), None)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_host.IBMSVChost.get_existing_host')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_host.IBMSVChost.host_create')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_rdmanvme_nqn_update_when_new_added(self, svc_authorize_mock, host_create_mock, get_existing_host_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test',
+ 'state': 'present',
+ 'nqn': 'nqn.2014-08.com.example:nvme:nvm-example-sn-d78434,nqn.2014-08.com.example:nvme:nvm-example-sn-d78433',
+ 'protocol': 'rdmanvme',
+ 'portset': 'portset0',
+ 'type': 'generic'
+ })
+
+ host = {u'message': u'Host, id [14], '
+ u'successfully created', u'id': u'14'}
+ host_create_mock.return_value = host
+ get_existing_host_mock.return_value = []
+ host_created = IBMSVChost()
+ with pytest.raises(AnsibleExitJson) as exc:
+ host_created.apply()
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_host_nqn_update(self, svc_authorize_mock, svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test',
+ 'state': 'present',
+ 'nqn': 'nqn.2014-08.com.example:nvme:nvm-example-sn-d78434,nqn.2014-08.com.example:nvme:nvm-example-sn-d78431',
+ 'protocol': 'rdmanvme',
+ 'type': 'generic'
+ })
+ obj = IBMSVChost()
+ obj.existing_nqn = ['nqn.2014-08.com.example:nvme:nvm-example-sn-d78434', 'nqn.2014-08.com.example:nvme:nvm-example-sn-d78433']
+ obj.input_nqn = ['nqn.2014-08.com.example:nvme:nvm-example-sn-d78434', 'nqn.2014-08.com.example:nvme:nvm-example-sn-d78431']
+ self.assertEqual(obj.host_nqn_update(), None)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_host_storage_partition_update(self, svc_authorize_mock, svc_obj_info_mock, src):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test',
+ 'state': 'present',
+ 'partition': 'partition1'
+ })
+ svc_obj_info_mock.return_value = {
+ 'id': '24', 'name': 'test', 'port_count': '5', 'type': 'generic',
+ 'mask': '1111111', 'iogrp_count': '4', 'status': 'offline',
+ 'site_id': '', 'site_name': 'site2', 'partition_name': ''
+ }
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj = IBMSVChost()
+ obj.apply()
+ self.assertEqual(True, exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_host.IBMSVChost.get_existing_host')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_host.IBMSVChost.host_create')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_tcpnvmehost_successfully(self, svc_authorize_mock,
+ host_create_mock,
+ get_existing_host_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'ansible_host',
+ 'protocol': 'tcpnvme',
+ 'nqn': 'nqn.2014-08.org.nvmexpress:NVMf:uuid:644f51bf-8432-4f59-bb13-5ada20c06397'
+ })
+ host = {u'message': u'Host, id [0], '
+ u'successfully created', u'id': u'0'}
+ host_create_mock.return_value = host
+ get_existing_host_mock.return_value = []
+ tcpnvme_host_obj = IBMSVChost()
+ with pytest.raises(AnsibleExitJson) as exc:
+ tcpnvme_host_obj.apply()
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_hostcluster.py b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_hostcluster.py
new file mode 100644
index 000000000..e2d5923c4
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_hostcluster.py
@@ -0,0 +1,343 @@
+# Copyright (C) 2020 IBM CORPORATION
+# Author(s): Shilpi Jain <shilpi.jain1@ibm.com>
+#
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+""" unit tests IBM Storage Virtualize Ansible module: ibm_svc_hostcluster """
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import unittest
+import pytest
+import json
+from mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi
+from ansible_collections.ibm.storage_virtualize.plugins.modules.ibm_svc_hostcluster import IBMSVChostcluster
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module
+ creation """
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the
+ test case """
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the
+ test case """
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an
+ exception """
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an
+ exception """
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class TestIBMSVChostcluster(unittest.TestCase):
+ """ a group of related Unit Tests"""
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def setUp(self, connect):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.restapi = IBMSVCRestApi(self.mock_module_helper, '1.2.3.4',
+ 'domain.ibm.com', 'username', 'password',
+ False, 'test.log', '')
+
+ def set_default_args(self):
+ return dict({
+ 'name': 'test',
+ 'state': 'present'
+ })
+
+ def test_module_fail_when_required_args_missing(self):
+ """ required arguments are reported as errors """
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ IBMSVChostcluster()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_get_existing_hostcluster(self, svc_authorize_mock, svc_obj_info_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'ansible_hostcluster',
+ })
+ hostcluster_ret = [{"id": "1", "name": "ansible_hostcluster", "port_count": "1",
+ "mapping_count": "4", "status": "offline", "host_count": "1",
+ "protocol": "nvme", "owner_id": "",
+ "owner_name": ""}]
+ svc_obj_info_mock.return_value = hostcluster_ret
+ host = IBMSVChostcluster().get_existing_hostcluster()
+ self.assertEqual('ansible_hostcluster', host['name'])
+ self.assertEqual('1', host['id'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_hostcluster.IBMSVChostcluster.get_existing_hostcluster')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_hostcluster_create_get_existing_hostcluster_called(self, svc_authorize_mock,
+ get_existing_hostcluster_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_host',
+ })
+ hostcluster_created = IBMSVChostcluster()
+ with pytest.raises(AnsibleExitJson) as exc:
+ hostcluster_created.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+ get_existing_hostcluster_mock.assert_called_with()
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_hostcluster.IBMSVChostcluster.get_existing_hostcluster')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_hostcluster_but_hostcluster_exist(self, svc_authorize_mock,
+ get_existing_hostcluster_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'hostcluster0',
+ })
+ hostcluster_ret = {
+ "id": "0",
+ "name": "hostcluster0",
+ "status": "online",
+ "host_count": "1",
+ "mapping_count": "0",
+ "port_count": "1",
+ "protocol": "scsi",
+ "owner_id": "0",
+ "owner_name": "group5"
+ }
+ get_existing_hostcluster_mock.return_value = hostcluster_ret
+ hostcluster_created = IBMSVChostcluster()
+ with pytest.raises(AnsibleExitJson) as exc:
+ hostcluster_created.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+ get_existing_hostcluster_mock.assert_called_with()
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_hostcluster.IBMSVChostcluster.get_existing_hostcluster')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_hostcluster.IBMSVChostcluster.hostcluster_create')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_hostcluster_successfully(self, svc_authorize_mock,
+ hostcluster_create_mock,
+ get_existing_hostcluster_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'ansible_hostcluster'
+ })
+ host = {u'message': u'Host cluster, id [14], '
+ u'successfully created', u'id': u'14'}
+ hostcluster_create_mock.return_value = host
+ get_existing_hostcluster_mock.return_value = []
+ hostcluster_created = IBMSVChostcluster()
+ with pytest.raises(AnsibleExitJson) as exc:
+ hostcluster_created.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+ get_existing_hostcluster_mock.assert_called_with()
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_hostcluster.IBMSVChostcluster.get_existing_hostcluster')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_hostcluster_failed_since_missed_required_param(
+ self, svc_authorize_mock, get_existing_hostcluster_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'ansible_hostcluster'
+ })
+ get_existing_hostcluster_mock.return_value = []
+ hostcluster_created = IBMSVChostcluster()
+ with pytest.raises(AnsibleFailJson) as exc:
+ hostcluster_created.apply()
+ self.assertTrue(exc.value.args[0]['failed'])
+ get_existing_hostcluster_mock.assert_called_with()
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_hostcluster.IBMSVChostcluster.get_existing_hostcluster')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_delete_hostcluster_but_hostcluster_not_exist(self, svc_authorize_mock,
+ get_existing_hostcluster_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'absent',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'ansible_hostcluster',
+ })
+ get_existing_hostcluster_mock.return_value = []
+ hostcluster_deleted = IBMSVChostcluster()
+ with pytest.raises(AnsibleExitJson) as exc:
+ hostcluster_deleted.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+ get_existing_hostcluster_mock.assert_called_with()
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_hostcluster.IBMSVChostcluster.get_existing_hostcluster')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_hostcluster.IBMSVChostcluster.hostcluster_delete')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_delete_hostcluster_successfully(self, svc_authorize_mock,
+ hostcluster_delete_mock,
+ get_existing_hostcluster_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'absent',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'ansible_hostcluster',
+ })
+ hostcluster_ret = [{"id": "1", "name": "ansible_hostcluster", "port_count": "1",
+ "mapping_count": "4", "status": "offline", "host_count": "1",
+ "protocol": "nvme", "owner_id": "",
+ "owner_name": ""}]
+ get_existing_hostcluster_mock.return_value = hostcluster_ret
+ hostcluster_deleted = IBMSVChostcluster()
+ with pytest.raises(AnsibleExitJson) as exc:
+ hostcluster_deleted.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+ get_existing_hostcluster_mock.assert_called_with()
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_hostcluster_update(self, auth, cmd1):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'absent',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'ansible_hostcluster',
+ 'ownershipgroup': 'new'
+ })
+ modify = [
+ 'ownershipgroup'
+ ]
+ cmd1.return_value = None
+ h = IBMSVChostcluster()
+ h.hostcluster_update(modify)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_module_to_validate_update(self, auth, cmd1, cmd2):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'absent',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'hostcluster0',
+ 'ownershipgroup': 'group1'
+ })
+ cmd1.return_value = {
+ "id": "0",
+ "name": "hostcluster0",
+ "status": "online",
+ "host_count": "1",
+ "mapping_count": "0",
+ "port_count": "1",
+ "protocol": "scsi",
+ "owner_id": "0",
+ "owner_name": "group5"
+ }
+ cmd2.return_value = None
+ with pytest.raises(AnsibleExitJson) as exc:
+ h = IBMSVChostcluster()
+ h.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_module_to_validate_noownershipgroup(self, auth, cmd1, cmd2):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'absent',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'hostcluster0',
+ 'noownershipgroup': True
+ })
+ cmd1.return_value = {
+ "id": "0",
+ "name": "hostcluster0",
+ "status": "online",
+ "host_count": "1",
+ "mapping_count": "0",
+ "port_count": "1",
+ "protocol": "scsi",
+ "owner_id": "0",
+ "owner_name": "group5"
+ }
+ cmd2.return_value = None
+ with pytest.raises(AnsibleExitJson) as exc:
+ h = IBMSVChostcluster()
+ h.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_info.py b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_info.py
new file mode 100644
index 000000000..b74e52c8b
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_info.py
@@ -0,0 +1,304 @@
+# Copyright (C) 2020 IBM CORPORATION
+# Author(s): Peng Wang <wangpww@cn.ibm.com>
+# Sreshtant Bohidar <sreshtant.bohidar@ibm.com>
+#
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+""" unit tests IBM Storage Virtualize Ansible module: ibm_svc_info """
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import unittest
+import pytest
+import json
+from mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi
+from ansible_collections.ibm.storage_virtualize.plugins.modules.ibm_svc_info import IBMSVCGatherInfo
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module
+ creation """
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the
+ test case """
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the
+ test case """
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an
+ exception """
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an
+ exception """
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class TestIBMSVCGatherInfo(unittest.TestCase):
+ """ a group of related Unit Tests"""
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def setUp(self, connect):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.restapi = IBMSVCRestApi(self.mock_module_helper, '1.2.3.4',
+ 'domain.ibm.com', 'username', 'password',
+ False, 'test.log', '')
+
+ def set_default_args(self):
+ return dict({
+ 'name': 'test',
+ 'state': 'present'
+ })
+
+ def test_module_fail_when_required_args_missing(self):
+ """ required arguments are reported as errors """
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ IBMSVCGatherInfo()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_info.IBMSVCGatherInfo.get_list')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_get_host_list_called(self, mock_svc_authorize,
+ get_list_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'gather_subset': 'host',
+ })
+ with pytest.raises(AnsibleExitJson) as exc:
+ IBMSVCGatherInfo().apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+ get_list_mock.assert_called_with('host', 'Host', 'lshost', False)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_the_host_result_by_gather_info(self, svc_authorize_mock,
+ svc_obj_info_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'gather_subset': 'host',
+ })
+ host_ret = [{"id": "1", "name": "ansible_host", "port_count": "1",
+ "iogrp_count": "4", "status": "offline",
+ "site_id": "", "site_name": "",
+ "host_cluster_id": "", "host_cluster_name": "",
+ "protocol": "nvme", "owner_id": "",
+ "owner_name": ""}]
+ svc_obj_info_mock.return_value = host_ret
+ with pytest.raises(AnsibleExitJson) as exc:
+ IBMSVCGatherInfo().apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+ self.assertDictEqual(exc.value.args[0]['Host'][0], host_ret[0])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_the_host_and_vol_result_by_gather_info(self, svc_authorize_mock,
+ svc_obj_info_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'gather_subset': 'host,vol',
+ })
+ host_ret = [{"id": "1", "name": "ansible_host", "port_count": "1",
+ "iogrp_count": "4", "status": "offline",
+ "site_id": "", "site_name": "",
+ "host_cluster_id": "", "host_cluster_name": "",
+ "protocol": "nvme", "owner_id": "",
+ "owner_name": ""}]
+ vol_ret = [{"id": "0", "name": "volume_Ansible_collections",
+ "IO_group_id": "0", "IO_group_name": "io_grp0",
+ "status": "online", "mdisk_grp_id": "0",
+ "mdisk_grp_name": "Pool_Ansible_collections",
+ "capacity": "4.00GB", "type": "striped", "FC_id": "",
+ "FC_name": "", "RC_id": "", "RC_name": "",
+ "vdisk_UID": "6005076810CA0166C00000000000019F",
+ "fc_map_count": "0", "copy_count": "1",
+ "fast_write_state": "empty", "se_copy_count": "0",
+ "RC_change": "no", "compressed_copy_count": "0",
+ "parent_mdisk_grp_id": "0",
+ "parent_mdisk_grp_name": "Pool_Ansible_collections",
+ "owner_id": "", "owner_name": "", "formatting": "no",
+ "encrypt": "no", "volume_id": "0",
+ "volume_name": "volume_Ansible_collections",
+ "function": "", "protocol": "scsi"}]
+ svc_obj_info_mock.side_effect = [host_ret, vol_ret]
+ with pytest.raises(AnsibleExitJson) as exc:
+ IBMSVCGatherInfo().apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+ self.assertDictEqual(exc.value.args[0]['Host'][0], host_ret[0])
+ self.assertDictEqual(exc.value.args[0]['Volume'][0], vol_ret[0])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_vol_volgrp_population_and_volgrpsnapshot_result_by_gather_info(self, svc_authorize_mock,
+ svc_obj_info_mock):
+ 'Test ibm_svc_info module for lsvolumepopulation, lsvolumegrouppopulation and lsvolumesgroupsnapshot'
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'gather_subset': 'volumepopulation,volumegrouppopulation,volumegroupsnapshot',
+ })
+
+ vol_population_ret = [{
+ "data_to_move": "",
+ "estimated_completion_time": "",
+ "rate": "",
+ "source_snapshot": "ssthin0",
+ "source_volume_id": "2",
+ "source_volume_name": "del_volume21697369620971",
+ "start_time": "231127004946",
+ "type": "thinclone",
+ "volume_group_id": "2",
+ "volume_group_name": "th_volgrp0",
+ "volume_id": "11",
+ "volume_name": "del_volume21697369620971-5"
+ }, {
+ "data_to_move": "",
+ "estimated_completion_time": "",
+ "rate": "",
+ "source_snapshot": "ssthin0",
+ "source_volume_id": "0",
+ "source_volume_name": "volume0",
+ "start_time": "231127004946",
+ "type": "thinclone",
+ "volume_group_id": "2",
+ "volume_group_name": "th_volgrp0",
+ "volume_id": "3",
+ "volume_name": "volume0-5"
+ }]
+
+ volgrp_population_ret = [{
+ "data_to_move": "",
+ "estimated_completion_time": "",
+ "id": "2",
+ "parent_uid": "0",
+ "rate": "",
+ "restore_estimated_completion_time": "",
+ "restore_snapshot_name": "",
+ "restore_start_time": "",
+ "source_snapshot": "ssthin0",
+ "source_volume_group_id": "0",
+ "source_volume_group_name": "volumegroup0",
+ "start_time": "231127004946",
+ "volume_group_name": "th_volgrp0",
+ "volume_group_type": "thinclone"
+ }]
+
+ volgrp_snapshot_ret = [{
+ "auto_snapshot": "no",
+ "expiration_time": "",
+ "id": "1",
+ "matches_group": "yes",
+ "name": "ssthin0",
+ "operation_completion_estimate": "",
+ "operation_start_time": "",
+ "owner_id": "",
+ "owner_name": "",
+ "parent_uid": "0",
+ "protection_provisioned_capacity": "33.00GB",
+ "protection_written_capacity": "2.25MB",
+ "safeguarded": "no",
+ "state": "active",
+ "time": "231127004706",
+ "volume_group_id": "0",
+ "volume_group_name": "volumegroup0"
+ }]
+
+ svc_obj_info_mock.side_effect = [vol_population_ret, volgrp_population_ret, volgrp_snapshot_ret]
+ with pytest.raises(AnsibleExitJson) as exc:
+ IBMSVCGatherInfo().apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+ self.assertDictEqual(exc.value.args[0]['VolumePopulation'][0], vol_population_ret[0])
+ self.assertDictEqual(exc.value.args[0]['VolumePopulation'][1], vol_population_ret[1])
+ self.assertDictEqual(exc.value.args[0]['VolumeGroupPopulation'][0], volgrp_population_ret[0])
+ self.assertDictEqual(exc.value.args[0]['VolumeGroupSnapshot'][0], volgrp_snapshot_ret[0])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_the_volumehostmap_result_by_gather_info(self, svc_authorize_mock,
+ svc_obj_info_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'gather_subset': 'vdiskhostmap',
+ 'objectname': 'volume_Ansible_collections'
+ })
+ vol_ret = [{"id": "0", "name": "volume_Ansible_collections",
+ "SCSI_id": "0", "host_id": "0", "host_name": "ansible_host",
+ "IO_group_id": "0", "IO_group_name": "io_grp0", "vdisk_UID": "600507681295018A3000000000000000",
+ "mapping_type": "private", "host_cluster_id": "",
+ "host_cluster_name": "", "protocol": "scsi"
+ }]
+
+ svc_obj_info_mock.return_value = vol_ret
+ with pytest.raises(AnsibleExitJson) as exc:
+ IBMSVCGatherInfo().apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+ self.assertDictEqual(exc.value.args[0]['VdiskHostMap'][0], vol_ret[0])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_the_volumehostmap_result_without_objectname_by_gather_info(self, svc_authorize_mock,
+ svc_obj_info_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'gather_subset': 'vdiskhostmap',
+ })
+ with pytest.raises(AnsibleFailJson) as exc:
+ IBMSVCGatherInfo().apply()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_initial_setup.py b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_initial_setup.py
new file mode 100644
index 000000000..2803a74ca
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_initial_setup.py
@@ -0,0 +1,639 @@
+# Copyright (C) 2020 IBM CORPORATION
+# Author(s): Sanjaikumaar M <sanjaikumaar.m@ibm.com>
+#
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+""" unit tests IBM Storage Virtualize Ansible module: ibm_svc_initial_setup """
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import unittest
+import pytest
+import json
+from mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi
+from ansible_collections.ibm.storage_virtualize.plugins.modules.ibm_svc_initial_setup import IBMSVCInitialSetup
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module
+ creation """
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the
+ test case """
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the
+ test case """
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an
+ exception """
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an
+ exception """
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class TestIBMSVCInitialSetup(unittest.TestCase):
+ """ a group of related Unit Tests"""
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def setUp(self, connect):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.restapi = IBMSVCRestApi(self.mock_module_helper, '1.2.3.4',
+ 'domain.ibm.com', 'username', 'password',
+ False, 'test.log', '')
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_initial_setup.IBMSVCInitialSetup.get_existing_dnsservers')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_initial_setup.IBMSVCInitialSetup.license_probe')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_initial_setup.IBMSVCInitialSetup.get_system_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ def test_module_with_no_input_params(self,
+ run_cmd_mock,
+ system_info_mock,
+ license_probe_mock,
+ dns_info_mock,
+ auth_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ })
+
+ license_probe_mock.return_value = []
+
+ svc_is = IBMSVCInitialSetup()
+ with pytest.raises(AnsibleExitJson) as exc:
+ svc_is.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_module_fail_with_mutually_exclusive_param(self, auth_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'time': '101009142021',
+ 'ntpip': '9.9.9.9'
+ })
+
+ svc_is = IBMSVCInitialSetup()
+ with pytest.raises(AnsibleFailJson) as exc:
+ svc_is.apply()
+
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_module_dns_validation_1(self, auth_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'dnsip': ['9.9.9.9']
+ })
+
+ svc_is = IBMSVCInitialSetup()
+ with pytest.raises(AnsibleFailJson) as exc:
+ svc_is.apply()
+
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_module_dns_validation_2(self, auth_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'dnsip': ['9.9.9.9'],
+ 'dnsname': []
+ })
+
+ svc_is = IBMSVCInitialSetup()
+ with pytest.raises(AnsibleFailJson) as exc:
+ svc_is.apply()
+
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_initial_setup.IBMSVCInitialSetup.get_existing_dnsservers')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_initial_setup.IBMSVCInitialSetup.get_system_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_initial_setup.IBMSVCInitialSetup.license_probe')
+ def test_module_system_and_dns(self,
+ license_probe_mock,
+ auth_mock,
+ system_info_mock,
+ run_cmd_mock,
+ dns_info_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'system_name': 'cluster_test_0',
+ 'time': '101009142021',
+ 'timezone': 200,
+ 'dnsname': ['test_dns'],
+ 'dnsip': ['1.1.1.1']
+ })
+
+ system_info_mock.return_value = {
+ "id": "0000010023806192",
+ "name": "",
+ "location": "local",
+ "cluster_locale": "en_US",
+ "time_zone": "200 Asia/Calcutta",
+ "cluster_ntp_IP_address": "",
+ }
+
+ license_probe_mock.return_value = []
+
+ dns_info_mock.return_value = [
+ {
+ "id": "0",
+ "name": "h",
+ "type": "ipv4",
+ "IP_address": "9.20.136.11",
+ "status": "active"
+ },
+ {
+ "id": "1",
+ "name": "i",
+ "type": "ipv4",
+ "IP_address": "9.20.136.25",
+ "status": "active"
+ }
+ ]
+
+ svc_is = IBMSVCInitialSetup()
+ with pytest.raises(AnsibleExitJson) as exc:
+ svc_is.apply()
+
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_initial_setup.IBMSVCInitialSetup.get_existing_dnsservers')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_initial_setup.IBMSVCInitialSetup.get_system_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_initial_setup.IBMSVCInitialSetup.license_probe')
+ def test_with_already_existed_system_and_dns(
+ self,
+ license_probe_mock,
+ auth_mock,
+ system_info_mock,
+ run_cmd_mock,
+ dns_info_mock):
+
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'system_name': 'cluster_test_0',
+ 'ntpip': '9.9.9.9',
+ 'timezone': 200,
+ 'dnsname': ['test_dns'],
+ 'dnsip': ['1.1.1.1']
+ })
+
+ system_info_mock.return_value = {
+ "id": "0000010023806192",
+ "name": "cluster_test_0",
+ "location": "local",
+ "cluster_locale": "en_US",
+ "time_zone": "200 Asia/Calcutta",
+ "cluster_ntp_IP_address": "9.9.9.9",
+ }
+
+ license_probe_mock.return_value = []
+
+ dns_info_mock.return_value = [
+ {
+ "id": "0",
+ "name": "test_dns",
+ "type": "ipv4",
+ "IP_address": "1.1.1.1",
+ "status": "active"
+ }
+ ]
+
+ svc_is = IBMSVCInitialSetup()
+ with pytest.raises(AnsibleExitJson) as exc:
+ svc_is.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_initial_setup.IBMSVCInitialSetup.get_existing_dnsservers')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_initial_setup.IBMSVCInitialSetup.license_probe')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_initial_setup.IBMSVCInitialSetup.get_system_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_module_license_key_update(self,
+ auth_mock,
+ system_info_mock,
+ run_cmd_mock,
+ license_probe_mock,
+ dns_info_mock):
+
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'license_key': ['0123-4567-89AB-CDEF']
+ })
+
+ license_probe_mock.return_value = []
+
+ run_cmd_mock.return_value = [
+ {
+ "id": "0",
+ "name": "encryption",
+ "state": "inactive",
+ "license_key": "",
+ "trial_expiration_date": "",
+ "serial_num": "",
+ "mtm": ""
+ }
+ ]
+
+ svc_is = IBMSVCInitialSetup()
+ with pytest.raises(AnsibleExitJson) as exc:
+ svc_is.apply()
+
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_initial_setup.IBMSVCInitialSetup.get_existing_dnsservers')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_initial_setup.IBMSVCInitialSetup.license_probe')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_initial_setup.IBMSVCInitialSetup.get_system_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_with_existing_license_key_update(self,
+ auth_mock,
+ system_info_mock,
+ run_cmd_mock,
+ license_probe_mock,
+ dns_info_mock):
+
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'license_key': ['0123-4567-89AB-CDEF']
+ })
+
+ license_probe_mock.return_value = []
+
+ run_cmd_mock.return_value = [
+ {
+ "id": "0",
+ "name": "encryption",
+ "state": "inactive",
+ "license_key": "0123-4567-89AB-CDEF",
+ "trial_expiration_date": "",
+ "serial_num": "",
+ "mtm": ""
+ }
+ ]
+
+ svc_is = IBMSVCInitialSetup()
+ with pytest.raises(AnsibleExitJson) as exc:
+ svc_is.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_initial_setup.IBMSVCInitialSetup.get_existing_dnsservers')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_initial_setup.IBMSVCInitialSetup.get_system_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_initial_setup.IBMSVCInitialSetup.license_probe')
+ def test_module_empty_timezone(self,
+ license_probe_mock,
+ auth_mock,
+ system_info_mock,
+ run_cmd_mock,
+ dns_info_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'system_name': 'cluster_test_0',
+ 'time': '101009142021',
+ 'timezone': 200,
+ })
+
+ system_info_mock.return_value = {
+ "id": "0000010023806192",
+ "name": "",
+ "location": "local",
+ "cluster_locale": "en_US",
+ "time_zone": "",
+ "cluster_ntp_IP_address": "",
+ }
+
+ license_probe_mock.return_value = []
+
+ dns_info_mock.return_value = []
+
+ svc_is = IBMSVCInitialSetup()
+ with pytest.raises(AnsibleExitJson) as exc:
+ svc_is.apply()
+
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_initial_setup.IBMSVCInitialSetup.get_existing_dnsservers')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_initial_setup.IBMSVCInitialSetup.get_system_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_license_update_storwize(self,
+ auth_mock,
+ system_info_mock,
+ run_cmd_mock,
+ dns_info_mock):
+
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'remote': 5,
+ 'virtualization': 1,
+ 'flash': 1,
+ 'compression': 4,
+ 'cloud': 1,
+ 'easytier': 1,
+ 'physical_flash': True,
+ 'encryption': True
+ })
+
+ system_info_mock.return_value = {
+ "id": "0000010023806192",
+ "name": "cluster_test_0",
+ "location": "local",
+ "cluster_locale": "en_US",
+ "time_zone": "200 Asia/Calcutta",
+ "cluster_ntp_IP_address": "9.9.9.9",
+ "product_name": "IBM Storwize V7000"
+ }
+
+ run_cmd_mock.return_value = {
+ "license_flash": "0",
+ "license_remote": "4",
+ "license_virtualization": "0",
+ "license_physical_disks": "0",
+ "license_physical_flash": "off",
+ "license_physical_remote": "off",
+ "license_compression_capacity": "4",
+ "license_compression_enclosures": "5",
+ "license_easy_tier": "0",
+ "license_cloud_enclosures": "0"
+ }
+
+ svc_is = IBMSVCInitialSetup()
+ with pytest.raises(AnsibleExitJson) as exc:
+ svc_is.apply()
+
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_initial_setup.IBMSVCInitialSetup.get_existing_dnsservers')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_initial_setup.IBMSVCInitialSetup.get_system_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_update_existing_license_storwize(self,
+ auth_mock,
+ system_info_mock,
+ run_cmd_mock,
+ dns_info_mock):
+
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'remote': 5,
+ 'virtualization': 1,
+ 'flash': 0,
+ 'compression': 4,
+ 'cloud': 0,
+ 'easytier': 0,
+ 'physical_flash': "off",
+ 'encryption': True
+ })
+
+ system_info_mock.return_value = {
+ "id": "0000010023806192",
+ "name": "cluster_test_0",
+ "location": "local",
+ "cluster_locale": "en_US",
+ "time_zone": "200 Asia/Calcutta",
+ "cluster_ntp_IP_address": "9.9.9.9",
+ "product_name": "IBM Storwize V7000"
+ }
+
+ run_cmd_mock.return_value = {
+ "license_flash": "0",
+ "license_remote": "5",
+ "license_virtualization": "1",
+ "license_physical_disks": "0",
+ "license_physical_flash": "off",
+ "license_physical_remote": "off",
+ "license_compression_capacity": "0",
+ "license_compression_enclosures": "4",
+ "license_easy_tier": "0",
+ "license_cloud_enclosures": "0"
+ }
+
+ svc_is = IBMSVCInitialSetup()
+ with pytest.raises(AnsibleExitJson) as exc:
+ svc_is.apply()
+
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_initial_setup.IBMSVCInitialSetup.get_existing_dnsservers')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_initial_setup.IBMSVCInitialSetup.get_system_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_license_update_with_SVC(self,
+ auth_mock,
+ system_info_mock,
+ run_cmd_mock,
+ dns_info_mock):
+
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'remote': 5,
+ 'virtualization': 1,
+ 'flash': 1,
+ 'compression': 4,
+ 'cloud': 1,
+ 'easytier': 1,
+ 'physical_flash': True,
+ 'encryption': True
+ })
+
+ system_info_mock.return_value = {
+ "id": "0000010023806192",
+ "name": "cluster_test_0",
+ "location": "local",
+ "cluster_locale": "en_US",
+ "time_zone": "200 Asia/Calcutta",
+ "cluster_ntp_IP_address": "9.9.9.9",
+ "product_name": "SVC"
+ }
+
+ run_cmd_mock.return_value = {
+ "license_flash": "0",
+ "license_remote": "4",
+ "license_virtualization": "0",
+ "license_physical_disks": "0",
+ "license_physical_flash": "off",
+ "license_physical_remote": "off",
+ "license_compression_capacity": "0",
+ "license_compression_enclosures": "4",
+ "license_easy_tier": "0",
+ "license_cloud_enclosures": "0"
+ }
+
+ svc_is = IBMSVCInitialSetup()
+ with pytest.raises(AnsibleExitJson) as exc:
+ svc_is.apply()
+
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_initial_setup.IBMSVCInitialSetup.get_existing_dnsservers')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_initial_setup.IBMSVCInitialSetup.get_system_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_license_update_existing_SVC(self,
+ auth_mock,
+ system_info_mock,
+ run_cmd_mock,
+ dns_info_mock):
+
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'remote': 5,
+ 'virtualization': 1,
+ 'flash': 1,
+ 'compression': 4,
+ 'cloud': 1,
+ 'easytier': 1,
+ 'physical_flash': "on",
+ 'encryption': True
+ })
+
+ system_info_mock.return_value = {
+ "id": "0000010023806192",
+ "name": "cluster_test_0",
+ "location": "local",
+ "cluster_locale": "en_US",
+ "time_zone": "200 Asia/Calcutta",
+ "cluster_ntp_IP_address": "9.9.9.9",
+ "product_name": "SVC"
+ }
+
+ run_cmd_mock.return_value = {
+ "license_flash": "1",
+ "license_remote": "5",
+ "license_virtualization": "1",
+ "license_physical_disks": "0",
+ "license_physical_flash": "on",
+ "license_physical_remote": "off",
+ "license_compression_capacity": "4",
+ "license_compression_enclosures": "5",
+ "license_easy_tier": "1",
+ "license_cloud_enclosures": "1"
+ }
+
+ svc_is = IBMSVCInitialSetup()
+ with pytest.raises(AnsibleExitJson) as exc:
+ svc_is.apply()
+
+ self.assertFalse(exc.value.args[0]['changed'])
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_callhome.py b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_callhome.py
new file mode 100644
index 000000000..4ef0e602c
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_callhome.py
@@ -0,0 +1,847 @@
+# Copyright (C) 2020 IBM CORPORATION
+# Author(s): Sreshtant Bohidar <sreshtant.bohidar@ibm.com>
+#
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+""" unit tests IBM Storage Virtualize Ansible module: ibm_svc_manage_callhome """
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import unittest
+import pytest
+import json
+from mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi
+from ansible_collections.ibm.storage_virtualize.plugins.modules.ibm_svc_manage_callhome import IBMSVCCallhome
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module
+ creation """
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the
+ test case """
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the
+ test case """
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an
+ exception """
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an
+ exception """
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class TestIBMSVCCallhome(unittest.TestCase):
+ """ a group of related Unit Tests"""
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def setUp(self, connect):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.restapi = IBMSVCRestApi(self.mock_module_helper, '1.2.3.4',
+ 'domain.ibm.com', 'username', 'password',
+ False, 'test.log', '')
+
+ def set_default_args(self):
+ return dict({
+ 'name': 'test',
+ 'state': 'enabled'
+ })
+
+ def test_module_fail_when_required_args_missing(self):
+ """ required arguments are reported as errors """
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ IBMSVCCallhome()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_basic_checks(self, mock_svc_authorize):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'state': 'enabled',
+ 'callhome_type': 'email',
+ 'company_name': 'company_name',
+ 'address': 'address',
+ 'city': 'city',
+ 'province': 'PRV',
+ 'postalcode': '123456',
+ 'country': 'US',
+ 'location': 'location',
+ 'contact_name': 'contact_name',
+ 'contact_email': 'test@domain.com',
+ 'phonenumber_primary': '1234567890',
+ 'serverIP': '9.20.118.16',
+ 'serverPort': 25
+ })
+ ch = IBMSVCCallhome()
+ data = ch.basic_checks()
+ self.assertEqual(data, None)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_get_system_data(self, mock_svc_authorize, mock_soi):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'state': 'enabled',
+ 'callhome_type': 'email',
+ 'company_name': 'company_name',
+ 'address': 'address',
+ 'city': 'city',
+ 'province': 'PRV',
+ 'postalcode': '123456',
+ 'country': 'US',
+ 'location': 'location',
+ 'contact_name': 'contact_name',
+ 'contact_email': 'test@domain.com',
+ 'phonenumber_primary': '1234567890',
+ 'serverIP': '9.20.118.16',
+ 'serverPort': 25
+ })
+ mock_soi.return_value = {
+ "id": "0000010023806192",
+ "name": "Cluster_9.71.42.198",
+ "location": "local",
+ "partnership": "",
+ "total_mdisk_capacity": "3.6TB",
+ "space_in_mdisk_grps": "3.6TB",
+ "space_allocated_to_vdisks": "449.70GB",
+ "total_free_space": "3.2TB",
+ "total_vdiskcopy_capacity": "993.00GB",
+ "total_used_capacity": "435.67GB",
+ "total_overallocation": "26",
+ "total_vdisk_capacity": "993.00GB",
+ "total_allocated_extent_capacity": "455.00GB",
+ "statistics_status": "on",
+ "statistics_frequency": "15",
+ "cluster_locale": "en_US",
+ "time_zone": "503 SystemV/PST8",
+ "code_level": "8.4.2.0 (build 154.20.2109031944000)",
+ "console_IP": "9.71.42.198:443",
+ "id_alias": "0000010023806192",
+ "gm_link_tolerance": "300",
+ "gm_inter_cluster_delay_simulation": "0",
+ "gm_intra_cluster_delay_simulation": "0",
+ "gm_max_host_delay": "5",
+ "email_reply": "sreshtant.bohidar@ibm.com",
+ "email_contact": "Sreshtant Bohidar",
+ "email_contact_primary": "9439394132",
+ "email_contact_alternate": "9439394132",
+ "email_contact_location": "floor 2",
+ "email_contact2": "",
+ "email_contact2_primary": "",
+ "email_contact2_alternate": "",
+ "email_state": "stopped",
+ "inventory_mail_interval": "1",
+ "cluster_ntp_IP_address": "2.2.2.2",
+ "cluster_isns_IP_address": "",
+ "iscsi_auth_method": "none",
+ "iscsi_chap_secret": "",
+ "auth_service_configured": "no",
+ "auth_service_enabled": "no",
+ "auth_service_url": "",
+ "auth_service_user_name": "",
+ "auth_service_pwd_set": "no",
+ "auth_service_cert_set": "no",
+ "auth_service_type": "ldap",
+ "relationship_bandwidth_limit": "25",
+ "tiers": [
+ {
+ "tier": "tier_scm",
+ "tier_capacity": "0.00MB",
+ "tier_free_capacity": "0.00MB"
+ },
+ {
+ "tier": "tier0_flash",
+ "tier_capacity": "1.78TB",
+ "tier_free_capacity": "1.47TB"
+ },
+ {
+ "tier": "tier1_flash",
+ "tier_capacity": "0.00MB",
+ "tier_free_capacity": "0.00MB"
+ },
+ {
+ "tier": "tier_enterprise",
+ "tier_capacity": "0.00MB",
+ "tier_free_capacity": "0.00MB"
+ },
+ {
+ "tier": "tier_nearline",
+ "tier_capacity": "1.82TB",
+ "tier_free_capacity": "1.68TB"
+ }
+ ],
+ "easy_tier_acceleration": "off",
+ "has_nas_key": "no",
+ "layer": "storage",
+ "rc_buffer_size": "256",
+ "compression_active": "no",
+ "compression_virtual_capacity": "0.00MB",
+ "compression_compressed_capacity": "0.00MB",
+ "compression_uncompressed_capacity": "0.00MB",
+ "cache_prefetch": "on",
+ "email_organization": "IBM",
+ "email_machine_address": "Street 39",
+ "email_machine_city": "New York",
+ "email_machine_state": "CAN",
+ "email_machine_zip": "123456",
+ "email_machine_country": "US",
+ "total_drive_raw_capacity": "10.10TB",
+ "compression_destage_mode": "off",
+ "local_fc_port_mask": "1111111111111111111111111111111111111111111111111111111111111111",
+ "partner_fc_port_mask": "1111111111111111111111111111111111111111111111111111111111111111",
+ "high_temp_mode": "off",
+ "topology": "hyperswap",
+ "topology_status": "dual_site",
+ "rc_auth_method": "none",
+ "vdisk_protection_time": "15",
+ "vdisk_protection_enabled": "no",
+ "product_name": "IBM Storwize V7000",
+ "odx": "off",
+ "max_replication_delay": "0",
+ "partnership_exclusion_threshold": "315",
+ "gen1_compatibility_mode_enabled": "no",
+ "ibm_customer": "262727272",
+ "ibm_component": "",
+ "ibm_country": "383",
+ "tier_scm_compressed_data_used": "0.00MB",
+ "tier0_flash_compressed_data_used": "0.00MB",
+ "tier1_flash_compressed_data_used": "0.00MB",
+ "tier_enterprise_compressed_data_used": "0.00MB",
+ "tier_nearline_compressed_data_used": "0.00MB",
+ "total_reclaimable_capacity": "380.13MB",
+ "physical_capacity": "3.60TB",
+ "physical_free_capacity": "3.15TB",
+ "used_capacity_before_reduction": "361.81MB",
+ "used_capacity_after_reduction": "14.27GB",
+ "overhead_capacity": "34.00GB",
+ "deduplication_capacity_saving": "0.00MB",
+ "enhanced_callhome": "on",
+ "censor_callhome": "on",
+ "host_unmap": "off",
+ "backend_unmap": "on",
+ "quorum_mode": "standard",
+ "quorum_site_id": "",
+ "quorum_site_name": "",
+ "quorum_lease": "short",
+ "automatic_vdisk_analysis_enabled": "on",
+ "callhome_accepted_usage": "no",
+ "safeguarded_copy_suspended": "no",
+ 'serverIP': '9.20.118.16',
+ 'serverPort': 25
+ }
+ ch = IBMSVCCallhome()
+ data = ch.get_system_data()
+ self.assertEqual(data['callhome_accepted_usage'], 'no')
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_get_existing_email_user_data(self, mock_svc_authorize, mock_soi):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'state': 'enabled',
+ 'callhome_type': 'email',
+ 'company_name': 'company_name',
+ 'address': 'address',
+ 'city': 'city',
+ 'province': 'PRV',
+ 'postalcode': '123456',
+ 'country': 'US',
+ 'location': 'location',
+ 'contact_name': 'contact_name',
+ 'contact_email': 'test@domain.com',
+ 'phonenumber_primary': '1234567890',
+ 'serverIP': '9.20.118.16',
+ 'serverPort': 25
+ })
+ mock_soi.return_value = [
+ {
+ "id": "0",
+ "name": "emailuser0",
+ "address": "callhome1@de.ibm.com",
+ "user_type": "support",
+ "error": "on",
+ "warning": "off",
+ "info": "off",
+ "inventory": "on"
+ },
+ {
+ "id": "1",
+ "name": "emailuser1",
+ "address": "test@domain.com",
+ "user_type": "local",
+ "error": "off",
+ "warning": "off",
+ "info": "off",
+ "inventory": "off"
+ }
+ ]
+ ch = IBMSVCCallhome()
+ data = ch.get_existing_email_user_data()
+ self.assertEqual(data['address'], 'test@domain.com')
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_check_email_server_exists(self, mock_svc_authorize, mock_soi):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'state': 'enabled',
+ 'callhome_type': 'email',
+ 'company_name': 'company_name',
+ 'address': 'address',
+ 'city': 'city',
+ 'province': 'PRV',
+ 'postalcode': '123456',
+ 'country': 'US',
+ 'location': 'location',
+ 'contact_name': 'contact_name',
+ 'contact_email': 'test@domain.com',
+ 'phonenumber_primary': '1234567890',
+ 'serverIP': '9.20.118.16',
+ 'serverPort': 25
+ })
+ mock_soi.return_value = [
+ {
+ "id": "0",
+ "name": "emailserver0",
+ "IP_address": "9.20.118.16",
+ "port": "25",
+ "status": "active"
+ }
+ ]
+ ch = IBMSVCCallhome()
+ data = ch.check_email_server_exists()
+ self.assertEqual(data, True)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_check_email_user_exists(self, mock_svc_authorize, mock_soi):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'state': 'enabled',
+ 'callhome_type': 'email',
+ 'company_name': 'company_name',
+ 'address': 'address',
+ 'city': 'city',
+ 'province': 'PRV',
+ 'postalcode': '123456',
+ 'country': 'US',
+ 'location': 'location',
+ 'contact_name': 'contact_name',
+ 'contact_email': 'test@domain.com',
+ 'phonenumber_primary': '1234567890',
+ 'serverIP': '9.20.118.16',
+ 'serverPort': 25
+ })
+ mock_soi.return_value = [
+ {
+ "id": "0",
+ "name": "emailuser0",
+ "address": "test@domain.com",
+ "user_type": "support",
+ "error": "on",
+ "warning": "off",
+ "info": "off",
+ "inventory": "off"
+ }
+ ]
+ ch = IBMSVCCallhome()
+ data = ch.check_email_user_exists()
+ self.assertEqual(data['id'], '0')
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_email_server(self, mock_svc_authorize, mock_src):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'state': 'enabled',
+ 'callhome_type': 'email',
+ 'company_name': 'company_name',
+ 'address': 'address',
+ 'city': 'city',
+ 'province': 'PRV',
+ 'postalcode': '123456',
+ 'country': 'US',
+ 'location': 'location',
+ 'contact_name': 'contact_name',
+ 'contact_email': 'test@domain.com',
+ 'phonenumber_primary': '1234567890',
+ 'serverIP': '9.20.118.16',
+ 'serverPort': 25
+ })
+ mock_src.return_value = {
+ 'id': '0',
+ 'message': 'Email Server id [0] successfully created'
+ }
+ ch = IBMSVCCallhome()
+ data = ch.create_email_server()
+ self.assertEqual(data, None)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_email_user(self, mock_svc_authorize, mock_src):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'state': 'enabled',
+ 'callhome_type': 'email',
+ 'company_name': 'company_name',
+ 'address': 'address',
+ 'city': 'city',
+ 'province': 'PRV',
+ 'postalcode': '123456',
+ 'country': 'US',
+ 'location': 'location',
+ 'contact_name': 'contact_name',
+ 'contact_email': 'test@domain.com',
+ 'phonenumber_primary': '1234567890',
+ 'serverIP': '9.20.118.16',
+ 'serverPort': 25
+ })
+ mock_src.return_value = {
+ 'id': '0',
+ 'message': 'User, id [0], successfully created'
+ }
+ ch = IBMSVCCallhome()
+ data = ch.create_email_user()
+ self.assertEqual(data, None)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_enable_email_callhome(self, mock_svc_authorize, mock_src):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'state': 'enabled',
+ 'callhome_type': 'email',
+ 'company_name': 'company_name',
+ 'address': 'address',
+ 'city': 'city',
+ 'province': 'PRV',
+ 'postalcode': '123456',
+ 'country': 'US',
+ 'location': 'location',
+ 'contact_name': 'contact_name',
+ 'contact_email': 'test@domain.com',
+ 'phonenumber_primary': '1234567890',
+ 'serverIP': '9.20.118.16',
+ 'serverPort': 25
+ })
+ mock_src.return_value = ''
+ ch = IBMSVCCallhome()
+ data = ch.enable_email_callhome()
+ self.assertEqual(data, None)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_disable_email_callhome(self, mock_svc_authorize, mock_src):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'state': 'disabled',
+ 'callhome_type': 'email',
+ 'company_name': 'company_name',
+ 'address': 'address',
+ 'city': 'city',
+ 'province': 'PRV',
+ 'postalcode': '123456',
+ 'country': 'US',
+ 'location': 'location',
+ 'contact_name': 'contact_name',
+ 'contact_email': 'test@domain.com',
+ 'phonenumber_primary': '1234567890',
+ 'serverIP': '9.20.118.16',
+ 'serverPort': 25
+ })
+ mock_src.return_value = ''
+ ch = IBMSVCCallhome()
+ data = ch.disable_email_callhome()
+ self.assertEqual(data, None)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_update_email_data(self, mock_svc_authorize, mock_src):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'state': 'disabled',
+ 'callhome_type': 'email',
+ 'company_name': 'company_name',
+ 'address': 'address',
+ 'city': 'city',
+ 'province': 'PRV',
+ 'postalcode': '123456',
+ 'country': 'US',
+ 'location': 'location',
+ 'contact_name': 'contact_name',
+ 'contact_email': 'test@domain.com',
+ 'phonenumber_primary': '1234567890',
+ 'serverIP': '9.20.118.16',
+ 'serverPort': 25
+ })
+ mock_src.return_value = ''
+ ch = IBMSVCCallhome()
+ data = ch.update_email_data()
+ self.assertEqual(data, None)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_get_existing_proxy(self, mock_svc_authorize, mock_soi):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'state': 'disabled',
+ 'callhome_type': 'cloud services',
+ 'company_name': 'company_name',
+ 'address': 'address',
+ 'city': 'city',
+ 'province': 'PRV',
+ 'postalcode': '123456',
+ 'country': 'US',
+ 'location': 'location',
+ 'contact_name': 'contact_name',
+ 'contact_email': 'test@domain.com',
+ 'phonenumber_primary': '1234567890',
+ 'serverIP': '9.20.118.16',
+ 'serverPort': 25,
+ 'proxy_url': 'http://h-proxy3.ssd.hursley.ibm.com',
+ 'proxy_port': 3128,
+ 'proxy_type': 'open_proxy'
+ })
+ mock_soi.return_value = {
+ "enabled": "yes",
+ "url": "http://h-proxy3.ssd.hursley.ibm.com",
+ "port": "3128",
+ "username": "",
+ "password_set": "no",
+ "certificate": "0 fields"
+ }
+ ch = IBMSVCCallhome()
+ data = ch.get_existing_proxy()
+ self.assertEqual(data['port'], '3128')
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_remove_proxy(self, mock_svc_authorize, mock_src):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'state': 'enabled',
+ 'callhome_type': 'cloud services',
+ 'company_name': 'company_name',
+ 'address': 'address',
+ 'city': 'city',
+ 'province': 'PRV',
+ 'postalcode': '123456',
+ 'country': 'US',
+ 'location': 'location',
+ 'contact_name': 'contact_name',
+ 'contact_email': 'test@domain.com',
+ 'phonenumber_primary': '1234567890',
+ 'serverIP': '9.20.118.16',
+ 'serverPort': 25,
+ 'proxy_url': 'http://h-proxy3.ssd.hursley.ibm.com',
+ 'proxy_port': 3128,
+ 'proxy_type': 'no_proxy'
+ })
+ mock_src.return_value = ''
+ ch = IBMSVCCallhome()
+ data = ch.remove_proxy()
+ self.assertEqual(data, None)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_proxy(self, mock_svc_authorize, mock_src):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'state': 'enabled',
+ 'callhome_type': 'cloud services',
+ 'company_name': 'company_name',
+ 'address': 'address',
+ 'city': 'city',
+ 'province': 'PRV',
+ 'postalcode': '123456',
+ 'country': 'US',
+ 'location': 'location',
+ 'contact_name': 'contact_name',
+ 'contact_email': 'test@domain.com',
+ 'phonenumber_primary': '1234567890',
+ 'serverIP': '9.20.118.16',
+ 'serverPort': 25,
+ 'proxy_url': 'http://h-proxy3.ssd.hursley.ibm.com',
+ 'proxy_port': 3128,
+ 'proxy_type': 'open_proxy'
+ })
+ mock_src.return_value = ''
+ ch = IBMSVCCallhome()
+ data = ch.create_proxy()
+ self.assertEqual(data, None)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_probe_proxy(self, mock_svc_authorize, mock_src):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'state': 'enabled',
+ 'callhome_type': 'cloud services',
+ 'company_name': 'company_name',
+ 'address': 'address',
+ 'city': 'city',
+ 'province': 'PRV',
+ 'postalcode': '123456',
+ 'country': 'US',
+ 'location': 'location',
+ 'contact_name': 'contact_name',
+ 'contact_email': 'test@domain.com',
+ 'phonenumber_primary': '1234567890',
+ 'serverIP': '9.20.118.16',
+ 'serverPort': 25,
+ 'proxy_url': 'http://h-proxy3.ssd.hursley.ibm.com',
+ 'proxy_port': 3128,
+ 'proxy_type': 'open_proxy'
+ })
+ data = {
+ "enabled": "yes",
+ "url": "http://h-proxy3.ssd.hursley.ibm.com",
+ "port": "3127",
+ "username": "",
+ "password_set": "no",
+ "certificate": "0 fields"
+ }
+ ch = IBMSVCCallhome()
+ data = ch.probe_proxy(data)
+ self.assertEqual(data['port'], 3128)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_update_proxy(self, mock_svc_authorize, mock_src):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'state': 'enabled',
+ 'callhome_type': 'cloud services',
+ 'company_name': 'company_name',
+ 'address': 'address',
+ 'city': 'city',
+ 'province': 'PRV',
+ 'postalcode': '123456',
+ 'country': 'US',
+ 'location': 'location',
+ 'contact_name': 'contact_name',
+ 'contact_email': 'test@domain.com',
+ 'phonenumber_primary': '1234567890',
+ 'serverIP': '9.20.118.16',
+ 'serverPort': 25,
+ 'proxy_url': 'http://h-proxy3.ssd.hursley.ibm.com',
+ 'proxy_port': 3128,
+ 'proxy_type': 'open_proxy'
+ })
+ data = {
+ 'port': 3128
+ }
+ mock_src.return_value = ''
+ ch = IBMSVCCallhome()
+ data = ch.update_proxy(data)
+ self.assertEqual(data, None)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_get_existing_cloud_callhome_data(self, mock_svc_authorize, mock_soi):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'state': 'disabled',
+ 'callhome_type': 'cloud services',
+ 'company_name': 'company_name',
+ 'address': 'address',
+ 'city': 'city',
+ 'province': 'PRV',
+ 'postalcode': '123456',
+ 'country': 'US',
+ 'location': 'location',
+ 'contact_name': 'contact_name',
+ 'contact_email': 'test@domain.com',
+ 'phonenumber_primary': '1234567890',
+ 'serverIP': '9.20.118.16',
+ 'serverPort': 25,
+ 'proxy_url': 'http://h-proxy3.ssd.hursley.ibm.com',
+ 'proxy_port': 3128,
+ 'proxy_type': 'open_proxy'
+ })
+ mock_soi.return_value = {
+ "status": "disabled",
+ "connection": "",
+ "error_sequence_number": "",
+ "last_success": "",
+ "last_failure": ""
+ }
+ ch = IBMSVCCallhome()
+ data = ch.get_existing_cloud_callhome_data()
+ self.assertEqual(data['status'], 'disabled')
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_enable_cloud_callhome(self, mock_svc_authorize, mock_src):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'state': 'enabled',
+ 'callhome_type': 'cloud services',
+ 'company_name': 'company_name',
+ 'address': 'address',
+ 'city': 'city',
+ 'province': 'PRV',
+ 'postalcode': '123456',
+ 'country': 'US',
+ 'location': 'location',
+ 'contact_name': 'contact_name',
+ 'contact_email': 'test@domain.com',
+ 'phonenumber_primary': '1234567890',
+ 'serverIP': '9.20.118.16',
+ 'serverPort': 25,
+ 'proxy_url': 'http://h-proxy3.ssd.hursley.ibm.com',
+ 'proxy_port': 3128,
+ 'proxy_type': 'open_proxy'
+ })
+ mock_src.return_value = ''
+ ch = IBMSVCCallhome()
+ data = ch.enable_cloud_callhome()
+ self.assertEqual(data, None)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_disable_cloud_callhome(self, mock_svc_authorize, mock_src):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'state': 'disabled',
+ 'callhome_type': 'cloud services',
+ 'company_name': 'company_name',
+ 'address': 'address',
+ 'city': 'city',
+ 'province': 'PRV',
+ 'postalcode': '123456',
+ 'country': 'US',
+ 'location': 'location',
+ 'contact_name': 'contact_name',
+ 'contact_email': 'test@domain.com',
+ 'phonenumber_primary': '1234567890',
+ 'serverIP': '9.20.118.16',
+ 'serverPort': 25,
+ 'proxy_url': 'http://h-proxy3.ssd.hursley.ibm.com',
+ 'proxy_port': 3128,
+ 'proxy_type': 'open_proxy'
+ })
+ mock_src.return_value = ''
+ ch = IBMSVCCallhome()
+ data = ch.disable_cloud_callhome()
+ self.assertEqual(data, None)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_consistgrp_flashcopy.py b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_consistgrp_flashcopy.py
new file mode 100644
index 000000000..360aca969
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_consistgrp_flashcopy.py
@@ -0,0 +1,401 @@
+# Copyright (C) 2020 IBM CORPORATION
+# Author(s): Sreshtant Bohidar <sreshtant.bohidar@ibm.com>
+#
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+""" unit tests IBM Storage Virtualize Ansible module: ibm_svc_manage_consistgrp_flashcopy """
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import unittest
+import pytest
+import json
+from mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi
+from ansible_collections.ibm.storage_virtualize.plugins.modules.ibm_svc_manage_consistgrp_flashcopy import IBMSVCFlashcopyConsistgrp
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation """
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the
+ test case """
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the
+ test case """
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an
+ exception """
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an
+ exception """
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class TestIBMSVCFlashcopyConsistgrp(unittest.TestCase):
+ """ a group of related Unit Tests"""
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def setUp(self, connect):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.restapi = IBMSVCRestApi(self.mock_module_helper, '1.2.3.4',
+ 'domain.ibm.com', 'username', 'password',
+ False, 'test.log', '')
+
+ def set_default_args(self):
+ return dict({
+ 'name': 'test',
+ 'state': 'present'
+ })
+
+ def test_module_fail_when_required_args_missing(self):
+ """ required arguments are reported as errors """
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ IBMSVCFlashcopyConsistgrp()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_get_existing_fcconsistgrp(self, svc_authorize_mock, svc_obj_info_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'state': 'present',
+ 'ownershipgroup': 'ownershipgroup_name'
+ })
+ svc_obj_info_mock.return_value = {
+ "id": "3", "name": "test_name", "status": "empty",
+ "autodelete": "off", "start_time": "",
+ "owner_id": "", "owner_name": "ownershipgroup_name"
+ }
+ obj = IBMSVCFlashcopyConsistgrp()
+ data = obj.get_existing_fcconsistgrp()
+ self.assertEqual("test_name", data["name"])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_fcconsistgrp_create(self, svc_authorize_mock, svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'state': 'present',
+ 'ownershipgroup': 'ownershipgroup_name'
+ })
+ svc_run_command_mock.return_value = {
+ 'id': '4',
+ 'message': 'FlashCopy Consistency Group, id [4], successfully created'
+ }
+ obj = IBMSVCFlashcopyConsistgrp()
+ data = obj.fcconsistgrp_create()
+ self.assertEqual(None, data)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_fcconsistgrp_delete(self, svc_authorize_mock, svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'state': 'absent',
+ })
+ svc_run_command_mock.return_value = None
+ obj = IBMSVCFlashcopyConsistgrp()
+ data = obj.fcconsistgrp_delete()
+ self.assertEqual(None, data)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_fcconsistgrp_probe(self, svc_authorize_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'state': 'present',
+ 'ownershipgroup': 'ownershipgroup_name'
+ })
+ modify_arg = {
+ "id": "3", "name": "test_name", "status": "empty",
+ "autodelete": "off", "start_time": "",
+ "owner_id": "", "owner_name": "ownershipgroup_name_old"
+ }
+ obj = IBMSVCFlashcopyConsistgrp()
+ data = obj.fcconsistgrp_probe(modify_arg)
+ self.assertIn('ownershipgroup', data)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_fcconsistgrp_probe_noconsistgrp(self, svc_authorize_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'state': 'present',
+ 'noownershipgroup': True
+ })
+ modify_arg = {
+ "id": "3", "name": "test_name", "status": "empty",
+ "autodelete": "off", "start_time": "",
+ "owner_id": "", "owner_name": "ownershipgroup_name"
+ }
+ obj = IBMSVCFlashcopyConsistgrp()
+ data = obj.fcconsistgrp_probe(modify_arg)
+ self.assertIn('noownershipgroup', data)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_fcconsistgrp_update(self, svc_authorize_mock, svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'state': 'present',
+ 'ownershipgroup': 'ownershipgroup_name'
+ })
+ modify_arg = {
+ 'ownershipgroup': 'ownershipgroup_name',
+ }
+ obj = IBMSVCFlashcopyConsistgrp()
+ data = obj.fcconsistgrp_update(modify_arg)
+ self.assertEqual(None, data)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_fcconsistgrp_update_noconsistgrp(self, svc_authorize_mock, svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'state': 'present',
+ 'ownershipgroup': 'ownershipgroup_name'
+ })
+ modify_arg = {
+ 'noownershipgroup': True,
+ }
+ obj = IBMSVCFlashcopyConsistgrp()
+ data = obj.fcconsistgrp_update(modify_arg)
+ self.assertEqual(None, data)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_consistgrp_flashcopy.IBMSVCFlashcopyConsistgrp.fcconsistgrp_create')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_consistgrp_flashcopy.IBMSVCFlashcopyConsistgrp.get_existing_fcconsistgrp')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_nonexisting_fcconsisgrp(self, svc_authorize_mock, svc_run_command_mock, gef, fc):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'state': 'present',
+ 'ownershipgroup': 'ownershipgroup_name'
+ })
+ gef.return_value = {}
+ fc.return_value = {
+ 'id': '4',
+ 'message': 'FlashCopy Consistency Group, id [4], successfully created'
+ }
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj = IBMSVCFlashcopyConsistgrp()
+ obj.apply()
+
+ self.assertEqual(True, exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_consistgrp_flashcopy.IBMSVCFlashcopyConsistgrp.get_existing_fcconsistgrp')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_existing_fcconsisgrp(self, svc_authorize_mock, svc_run_command_mock, gef):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'state': 'present',
+ 'ownershipgroup': 'ownershipgroup_name'
+ })
+ gef.return_value = {
+ "id": "3", "name": "test_name", "status": "empty",
+ "autodelete": "off", "start_time": "",
+ "owner_id": "", "owner_name": "ownershipgroup_name"
+ }
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj = IBMSVCFlashcopyConsistgrp()
+ obj.apply()
+
+ self.assertEqual(False, exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_consistgrp_flashcopy.IBMSVCFlashcopyConsistgrp.fcconsistgrp_update')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_consistgrp_flashcopy.IBMSVCFlashcopyConsistgrp.get_existing_fcconsistgrp')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_updating_existing_fcconsisgrp(self, svc_authorize_mock, svc_run_command_mock, gef, fu):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'state': 'present',
+ 'ownershipgroup': 'ownershipgroup_name'
+ })
+ gef.return_value = {
+ "id": "3", "name": "test_name", "status": "empty",
+ "autodelete": "off", "start_time": "",
+ "owner_id": "", "owner_name": "ownershipgroup_name_old"
+ }
+ fu.return_value = None
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj = IBMSVCFlashcopyConsistgrp()
+ obj.apply()
+
+ self.assertEqual(True, exc.value.args[0]["changed"])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_consistgrp_flashcopy.IBMSVCFlashcopyConsistgrp.fcconsistgrp_delete')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_consistgrp_flashcopy.IBMSVCFlashcopyConsistgrp.get_existing_fcconsistgrp')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_deleting_existing_fcconsisgrp(self, svc_authorize_mock, svc_run_command_mock, gef, fd):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'state': 'absent',
+ })
+ gef.return_value = {
+ "id": "3", "name": "test_name", "status": "empty",
+ "autodelete": "off", "start_time": "",
+ "owner_id": "", "owner_name": "ownershipgroup_name"
+ }
+ fd.return_value = None
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj = IBMSVCFlashcopyConsistgrp()
+ obj.apply()
+
+ self.assertEqual(True, exc.value.args[0]["changed"])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_consistgrp_flashcopy.IBMSVCFlashcopyConsistgrp.fcconsistgrp_delete')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_consistgrp_flashcopy.IBMSVCFlashcopyConsistgrp.get_existing_fcconsistgrp')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_deleting_existing_fcconsisgrp_with_force(self, svc_authorize_mock, svc_run_command_mock, gef, fd):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'state': 'absent',
+ 'force': True
+ })
+ gef.return_value = {
+ "id": "3", "name": "test_name", "status": "empty",
+ "autodelete": "off", "start_time": "",
+ "owner_id": "", "owner_name": "ownershipgroup_name"
+ }
+ fd.return_value = None
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj = IBMSVCFlashcopyConsistgrp()
+ obj.apply()
+
+ self.assertEqual(True, exc.value.args[0]["changed"])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_consistgrp_flashcopy.IBMSVCFlashcopyConsistgrp.fcconsistgrp_delete')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_consistgrp_flashcopy.IBMSVCFlashcopyConsistgrp.get_existing_fcconsistgrp')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_deleting_nonexisting_fcconsisgrp(self, svc_authorize_mock, svc_run_command_mock, gef, fd):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'state': 'absent',
+ })
+ gef.return_value = {}
+ fd.return_value = None
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj = IBMSVCFlashcopyConsistgrp()
+ obj.apply()
+
+ self.assertEqual(False, exc.value.args[0]['changed'])
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_cv.py b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_cv.py
new file mode 100644
index 000000000..16fe3bb36
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_cv.py
@@ -0,0 +1,808 @@
+# Copyright (C) 2020 IBM CORPORATION
+# Author(s):
+#
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+""" unit tests IBM Storage Virtualize Ansible module: ibm_svc_manage_replication """
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import unittest
+import pytest
+import json
+from mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi
+from ansible_collections.ibm.storage_virtualize.plugins.modules.ibm_svc_manage_cv import IBMSVCchangevolume
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation """
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the
+ test case """
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the
+ test case """
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an
+ exception """
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an
+ exception """
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class TestIBMSVCchangevolume(unittest.TestCase):
+ """ a group of related Unit Tests"""
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def setUp(self, connect):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.restapi = IBMSVCRestApi(self.mock_module_helper, '1.2.3.4',
+ 'domain.ibm.com', 'username', 'password',
+ False, 'test.log', '')
+
+ def set_default_args(self):
+ return dict({
+ 'name': 'test',
+ 'state': 'present'
+ })
+
+ def test_module_fail_when_required_args_missing(self):
+ """ required arguments are reported as errors """
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ IBMSVCchangevolume()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_get_existing_rc(self, svc_authorize_mock, svc_obj_info_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'cvname': 'test_cvname',
+ 'basevolume': 'test_base_volume',
+ 'state': 'present',
+ 'rname': 'test_rname',
+ 'ismaster': 'true'
+ })
+ svc_obj_info_mock.return_value = {
+ "id": "305", "name": "test_cvname", "master_cluster_id": "00000204204071F0",
+ "master_cluster_name": "Cluster_altran-stand5", "master_vdisk_id": "305",
+ "master_vdisk_name": "master34", "aux_cluster_id": "00000204202071BC",
+ "aux_cluster_name": "aux_cluster_name", "aux_vdisk_id": "197",
+ "aux_vdisk_name": "aux34", "primary": "master", "consistency_group_id": "19 ",
+ "consistency_group_name": "test_name", "state": "consistent_synchronized",
+ "bg_copy_priority": "50", "progress": "", "freeze_time": "", "status": "online",
+ "sync": "", "copy_type": "metro", "cycling_mode": "", "cycle_period_seconds": "300",
+ "master_change_vdisk_id": "", "master_change_vdisk_name": "", "aux_change_vdisk_id": "",
+ "aux_change_vdisk_name": "", "previous_primary": "", "channel": "none"
+ }
+ obj = IBMSVCchangevolume()
+ return_data = obj.get_existing_rc()
+ self.assertEqual('test_cvname', return_data['name'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_get_existing_vdisk(self, svc_authorize_mock, svc_obj_info_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'cvname': 'test_cvname',
+ 'basevolume': 'test_base_volume',
+ 'state': 'present',
+ 'rname': 'test_rname',
+ 'ismaster': 'true'
+ })
+ svc_obj_info_mock.return_value = [
+ {
+ "id": "101", "name": "test_cvname", "IO_group_id": "0", "IO_group_name": "io_grp0",
+ "status": "online", "mdisk_grp_id": "1", "mdisk_grp_name": "AnsibleMaster",
+ "capacity": "536870912", "type": "striped", "formatted": "yes", "formatting": "no",
+ "mdisk_id": "", "mdisk_name": "", "FC_id": "many", "FC_name": "many", "RC_id": "101",
+ "RC_name": "rcopy20", "vdisk_UID": "60050768108101C7C0000000000005D9", "preferred_node_id": "1",
+ "fast_write_state": "empty", "cache": "readwrite", "udid": "", "fc_map_count": "2",
+ "sync_rate": "50", "copy_count": "1", "se_copy_count": "0", "filesystem": "",
+ "mirror_write_priority": "latency", "RC_change": "no", "compressed_copy_count": "0",
+ "access_IO_group_count": "1", "last_access_time": "201123133855", "parent_mdisk_grp_id": "1",
+ "parent_mdisk_grp_name": "AnsibleMaster", "owner_type": "none", "owner_id": "", "owner_name": "",
+ "encrypt": "no", "volume_id": "101", "volume_name": "test_cvname", "function": "master", "throttle_id": "",
+ "throttle_name": "", "IOPs_limit": "", "bandwidth_limit_MB": "", "volume_group_id": "", "volume_group_name": "",
+ "cloud_backup_enabled": "no", "cloud_account_id": "", "cloud_account_name": "", "backup_status": "off",
+ "last_backup_time": "", "restore_status": "none", "backup_grain_size": "", "deduplicated_copy_count": "0",
+ "protocol": "scsi"
+ }, {
+ "copy_id": "0", "status": "online", "sync": "yes", "auto_delete": "no", "primary": "yes", "mdisk_grp_id": "1",
+ "mdisk_grp_name": "AnsibleMaster", "type": "striped", "mdisk_id": "", "mdisk_name": "",
+ "fast_write_state": "empty", "used_capacity": "536870912", "real_capacity": "536870912", "free_capacity": "0",
+ "overallocation": "100", "autoexpand": "", "warning": "", "grainsize": "", "se_copy": "no", "easy_tier": "on",
+ "easy_tier_status": "balanced", "tiers": [
+ {"tier": "tier_scm", "tier_capacity": "0"},
+ {"tier": "tier0_flash", "tier_capacity": "536870912"},
+ {"tier": "tier1_flash", "tier_capacity": "0"},
+ {"tier": "tier_enterprise", "tier_capacity": "0"},
+ {"tier": "tier_nearline", "tier_capacity": "0"}
+ ],
+ "compressed_copy": "no", "uncompressed_used_capacity": "536870912", "parent_mdisk_grp_id": "1",
+ "parent_mdisk_grp_name": "AnsibleMaster", "encrypt": "no", "deduplicated_copy": "no", "used_capacity_before_reduction": ""
+ }
+ ]
+ obj = IBMSVCchangevolume()
+ return_data = obj.get_existing_vdisk('test_cvname')
+ self.assertEqual('test_cvname', return_data['name'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_change_volume_attach_ismaster_true(self, svc_authorize_mock, svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'cvname': 'test_cvname',
+ 'basevolume': 'test_base_volume',
+ 'state': 'present',
+ 'rname': 'test_rname',
+ 'ismaster': 'true'
+ })
+ arg_data = {
+ "id": "305", "name": "test_cvname", "master_cluster_id": "00000204204071F0",
+ "master_cluster_name": "Cluster_altran-stand5", "master_vdisk_id": "305",
+ "master_vdisk_name": "master34", "aux_cluster_id": "00000204202071BC",
+ "aux_cluster_name": "aux_cluster_name", "aux_vdisk_id": "197",
+ "aux_vdisk_name": "aux34", "primary": "master", "consistency_group_id": "19 ",
+ "consistency_group_name": "test_name", "state": "consistent_synchronized",
+ "bg_copy_priority": "50", "progress": "", "freeze_time": "", "status": "online",
+ "sync": "", "copy_type": "global", "cycling_mode": "", "cycle_period_seconds": "300",
+ "master_change_vdisk_id": "", "master_change_vdisk_name": "", "aux_change_vdisk_id": "",
+ "aux_change_vdisk_name": "", "previous_primary": "", "channel": "none"
+ }
+ svc_run_command_mock.return_value = None
+ obj = IBMSVCchangevolume()
+ return_data = obj.change_volume_attach(arg_data)
+ self.assertEqual(None, return_data)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_change_volume_attach_ismaster_false(self, svc_authorize_mock, svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'cvname': 'test_cvname',
+ 'basevolume': 'test_base_volume',
+ 'state': 'present',
+ 'rname': 'test_rname',
+ 'ismaster': 'false'
+ })
+ arg_data = {
+ "id": "305", "name": "test_cvname", "master_cluster_id": "00000204204071F0",
+ "master_cluster_name": "Cluster_altran-stand5", "master_vdisk_id": "305",
+ "master_vdisk_name": "master34", "aux_cluster_id": "00000204202071BC",
+ "aux_cluster_name": "aux_cluster_name", "aux_vdisk_id": "197",
+ "aux_vdisk_name": "aux34", "primary": "master", "consistency_group_id": "19 ",
+ "consistency_group_name": "test_name", "state": "consistent_synchronized",
+ "bg_copy_priority": "50", "progress": "", "freeze_time": "", "status": "online",
+ "sync": "", "copy_type": "global", "cycling_mode": "", "cycle_period_seconds": "300",
+ "master_change_vdisk_id": "", "master_change_vdisk_name": "", "aux_change_vdisk_id": "",
+ "aux_change_vdisk_name": "", "previous_primary": "", "channel": "none"
+ }
+ svc_run_command_mock.return_value = None
+ obj = IBMSVCchangevolume()
+ return_data = obj.change_volume_attach(arg_data)
+ self.assertEqual(None, return_data)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_change_volume_detach_ismaster_true(self, svc_authorize_mock, svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'cvname': 'test_cvname',
+ 'basevolume': 'test_base_volume',
+ 'state': 'present',
+ 'rname': 'test_rname',
+ 'ismaster': 'true'
+ })
+ arg_data = {
+ "id": "305", "name": "test_cvname", "master_cluster_id": "00000204204071F0",
+ "master_cluster_name": "Cluster_altran-stand5", "master_vdisk_id": "305",
+ "master_vdisk_name": "master34", "aux_cluster_id": "00000204202071BC",
+ "aux_cluster_name": "aux_cluster_name", "aux_vdisk_id": "197",
+ "aux_vdisk_name": "aux34", "primary": "master", "consistency_group_id": "19 ",
+ "consistency_group_name": "test_name", "state": "consistent_synchronized",
+ "bg_copy_priority": "50", "progress": "", "freeze_time": "", "status": "online",
+ "sync": "", "copy_type": "metro", "cycling_mode": "", "cycle_period_seconds": "300",
+ "master_change_vdisk_id": "", "master_change_vdisk_name": "mcvn", "aux_change_vdisk_id": "",
+ "aux_change_vdisk_name": "", "previous_primary": "", "channel": "none"
+ }
+ svc_run_command_mock.return_value = None
+ obj = IBMSVCchangevolume()
+ return_data = obj.change_volume_detach(arg_data)
+ self.assertEqual(None, return_data)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_change_volume_detach_ismaster_false(self, svc_authorize_mock, svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'cvname': 'test_cvname',
+ 'basevolume': 'test_base_volume',
+ 'state': 'present',
+ 'rname': 'test_rname',
+ 'ismaster': 'false'
+ })
+ arg_data = {
+ "id": "305", "name": "test_cvname", "master_cluster_id": "00000204204071F0",
+ "master_cluster_name": "Cluster_altran-stand5", "master_vdisk_id": "305",
+ "master_vdisk_name": "master34", "aux_cluster_id": "00000204202071BC",
+ "aux_cluster_name": "aux_cluster_name", "aux_vdisk_id": "197",
+ "aux_vdisk_name": "aux34", "primary": "master", "consistency_group_id": "19 ",
+ "consistency_group_name": "test_name", "state": "consistent_synchronized",
+ "bg_copy_priority": "50", "progress": "", "freeze_time": "", "status": "online",
+ "sync": "", "copy_type": "metro", "cycling_mode": "", "cycle_period_seconds": "300",
+ "master_change_vdisk_id": "", "master_change_vdisk_name": "", "aux_change_vdisk_id": "",
+ "aux_change_vdisk_name": "acvn", "previous_primary": "", "channel": "none"
+ }
+ svc_run_command_mock.return_value = None
+ obj = IBMSVCchangevolume()
+ return_data = obj.change_volume_detach(arg_data)
+ self.assertEqual(None, return_data)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_cv.IBMSVCchangevolume.get_existing_rc')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_change_volume_probe_with_no_rcreldata(self, svc_authorize_mock, get_existing_rc_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'cvname': 'test_cvname',
+ 'basevolume': 'test_base_volume',
+ 'state': 'present',
+ 'rname': 'test_rname',
+ 'ismaster': 'true'
+ })
+ get_existing_rc_mock.return_value = None
+ with pytest.raises(AnsibleFailJson) as exc:
+ obj = IBMSVCchangevolume()
+ obj.change_volume_probe()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_cv.IBMSVCchangevolume.get_existing_rc')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_change_volume_probe_with_rcreldata(self, svc_authorize_mock, get_existing_rc_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'cvname': 'test_cvname',
+ 'basevolume': 'test_base_volume',
+ 'state': 'present',
+ 'rname': 'test_rname',
+ 'ismaster': 'true'
+ })
+ get_existing_rc_mock.return_value = {
+ "id": "305", "name": "test_cvname", "master_cluster_id": "00000204204071F0",
+ "master_cluster_name": "Cluster_altran-stand5", "master_vdisk_id": "305",
+ "master_vdisk_name": "master34", "aux_cluster_id": "00000204202071BC",
+ "aux_cluster_name": "aux_cluster_name", "aux_vdisk_id": "197",
+ "aux_vdisk_name": "aux34", "primary": "master", "consistency_group_id": "19 ",
+ "consistency_group_name": "test_name", "state": "consistent_synchronized",
+ "bg_copy_priority": "50", "progress": "", "freeze_time": "", "status": "online",
+ "sync": "", "copy_type": "metro", "cycling_mode": "", "cycle_period_seconds": "300",
+ "master_change_vdisk_id": "", "master_change_vdisk_name": "", "aux_change_vdisk_id": "",
+ "aux_change_vdisk_name": "", "previous_primary": "", "channel": "none"
+ }
+ obj = IBMSVCchangevolume()
+ return_data = obj.change_volume_probe()
+ self.assertTrue(return_data)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_change_volume_delete(self, svc_authorize_mock, svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'cvname': 'test_cvname',
+ 'basevolume': 'test_base_volume',
+ 'state': 'present',
+ 'rname': 'test_rname',
+ 'ismaster': 'true'
+ })
+ svc_run_command_mock.return_value = None
+ obj = IBMSVCchangevolume()
+ return_data = obj.change_volume_delete()
+ self.assertEqual(None, return_data)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_cv.IBMSVCchangevolume.get_existing_vdisk')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_change_volume_create_no_basevolume(self, svc_authorize_mock, get_existing_vdisk_mock, svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'cvname': 'test_cvname',
+ 'state': 'present',
+ 'rname': 'test_rname',
+ 'ismaster': 'true'
+ })
+ get_existing_vdisk_mock.return_value = []
+ svc_run_command_mock.return_value = None
+ with pytest.raises(AnsibleFailJson) as exc:
+ obj = IBMSVCchangevolume()
+ obj.change_volume_create()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_cv.IBMSVCchangevolume.get_existing_vdisk')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_change_volume_create_with_vdiskdata(self, svc_authorize_mock, get_existing_vdisk_mock, svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'cvname': 'test_cvname',
+ 'basevolume': 'test_base_volume',
+ 'state': 'present',
+ 'rname': 'test_rname',
+ 'ismaster': 'true'
+ })
+ get_existing_vdisk_mock.return_value = {
+ 'id': '101', 'name': 'test_rname', 'IO_group_id': '0', 'IO_group_name': 'io_grp0', 'status': 'online',
+ 'mdisk_grp_id': '1', 'mdisk_grp_name': 'AnsibleMaster', 'capacity': '536870912', 'type': 'striped',
+ 'formatted': 'yes', 'formatting': 'no', 'mdisk_id': '', 'mdisk_name': '', 'FC_id': 'many',
+ 'FC_name': 'many', 'RC_id': '101', 'RC_name': 'rcopy20', 'vdisk_UID': '60050768108101C7C0000000000005D9',
+ 'preferred_node_id': '1', 'fast_write_state': 'empty', 'cache': 'readwrite', 'udid': '', 'fc_map_count': '2',
+ 'sync_rate': '50', 'copy_count': '1', 'se_copy_count': '0', 'filesystem': '', 'mirror_write_priority': 'latency',
+ 'RC_change': 'no', 'compressed_copy_count': '0', 'access_IO_group_count': '1', 'last_access_time': '201123133855',
+ 'parent_mdisk_grp_id': '1', 'parent_mdisk_grp_name': 'AnsibleMaster', 'owner_type': 'none', 'owner_id': '',
+ 'owner_name': '', 'encrypt': 'no', 'volume_id': '101', 'volume_name': 'test_cvname', 'function': 'master',
+ 'throttle_id': '', 'throttle_name': '', 'IOPs_limit': '', 'bandwidth_limit_MB': '', 'volume_group_id': '',
+ 'volume_group_name': '', 'cloud_backup_enabled': 'no', 'cloud_account_id': '', 'cloud_account_name': '',
+ 'backup_status': 'off', 'last_backup_time': '', 'restore_status': 'none', 'backup_grain_size': '',
+ 'deduplicated_copy_count': '0', 'protocol': 'scsi', 'copy_id': '0', 'sync': 'yes', 'auto_delete': 'no',
+ 'primary': 'yes', 'used_capacity': '536870912', 'real_capacity': '536870912', 'free_capacity': '0',
+ 'overallocation': '100', 'autoexpand': '', 'warning': '', 'grainsize': '', 'se_copy': 'no', 'easy_tier': 'on',
+ 'easy_tier_status': 'balanced', 'tiers': [
+ {'tier': 'tier_scm', 'tier_capacity': '0'},
+ {'tier': 'tier0_flash', 'tier_capacity': '536870912'},
+ {'tier': 'tier1_flash', 'tier_capacity': '0'},
+ {'tier': 'tier_enterprise', 'tier_capacity': '0'},
+ {'tier': 'tier_nearline', 'tier_capacity': '0'}
+ ],
+ 'compressed_copy': 'no', 'uncompressed_used_capacity': '536870912',
+ 'deduplicated_copy': 'no', 'used_capacity_before_reduction': ''
+ }
+ svc_run_command_mock.return_value = {
+ 'message': {
+ 'name': 'test_cvname', 'mdiskgrp': 'SRA-DR-POOL', 'size': '536870912', 'unit': 'b',
+ 'rsize': '0%', 'autoexpand': True, 'iogrp': 'io_grp0'
+ }
+ }
+ obj = IBMSVCchangevolume()
+ return_data = obj.change_volume_create()
+ self.assertEqual(None, return_data)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_cv.IBMSVCchangevolume.change_volume_delete')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_cv.IBMSVCchangevolume.change_volume_detach')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_cv.IBMSVCchangevolume.get_existing_rc')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_cv.IBMSVCchangevolume.get_existing_vdisk')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_delete_change_volume(self, sam, gevm, germ, cv_detach_mock, cv_delete_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'cvname': 'test_cvname',
+ 'basevolume': 'test_base_volume',
+ 'state': 'absent',
+ 'rname': 'test_rname',
+ 'ismaster': 'true'
+ })
+ gevm.return_value = {
+ 'id': '101', 'name': 'test_cvname', 'IO_group_id': '0', 'IO_group_name': 'io_grp0', 'status': 'online',
+ 'mdisk_grp_id': '1', 'mdisk_grp_name': 'AnsibleMaster', 'capacity': '536870912', 'type': 'striped',
+ 'formatted': 'yes', 'formatting': 'no', 'mdisk_id': '', 'mdisk_name': '', 'FC_id': 'many',
+ 'FC_name': 'many', 'RC_id': '101', 'RC_name': 'rcopy20', 'vdisk_UID': '60050768108101C7C0000000000005D9',
+ 'preferred_node_id': '1', 'fast_write_state': 'empty', 'cache': 'readwrite', 'udid': '', 'fc_map_count': '2',
+ 'sync_rate': '50', 'copy_count': '1', 'se_copy_count': '0', 'filesystem': '', 'mirror_write_priority': 'latency',
+ 'RC_change': 'no', 'compressed_copy_count': '0', 'access_IO_group_count': '1', 'last_access_time': '201123133855',
+ 'parent_mdisk_grp_id': '1', 'parent_mdisk_grp_name': 'AnsibleMaster', 'owner_type': 'none', 'owner_id': '',
+ 'owner_name': '', 'encrypt': 'no', 'volume_id': '101', 'volume_name': 'test_cvname', 'function': 'master',
+ 'throttle_id': '', 'throttle_name': '', 'IOPs_limit': '', 'bandwidth_limit_MB': '', 'volume_group_id': '',
+ 'volume_group_name': '', 'cloud_backup_enabled': 'no', 'cloud_account_id': '', 'cloud_account_name': '',
+ 'backup_status': 'off', 'last_backup_time': '', 'restore_status': 'none', 'backup_grain_size': '',
+ 'deduplicated_copy_count': '0', 'protocol': 'scsi', 'copy_id': '0', 'sync': 'yes', 'auto_delete': 'no',
+ 'primary': 'yes', 'used_capacity': '536870912', 'real_capacity': '536870912', 'free_capacity': '0',
+ 'overallocation': '100', 'autoexpand': '', 'warning': '', 'grainsize': '', 'se_copy': 'no', 'easy_tier': 'on',
+ 'easy_tier_status': 'balanced', 'tiers': [
+ {'tier': 'tier_scm', 'tier_capacity': '0'},
+ {'tier': 'tier0_flash', 'tier_capacity': '536870912'},
+ {'tier': 'tier1_flash', 'tier_capacity': '0'},
+ {'tier': 'tier_enterprise', 'tier_capacity': '0'},
+ {'tier': 'tier_nearline', 'tier_capacity': '0'}
+ ],
+ 'compressed_copy': 'no', 'uncompressed_used_capacity': '536870912', 'deduplicated_copy': 'no',
+ 'used_capacity_before_reduction': ''
+ }
+ germ.return_value = {
+ "id": "305", "name": "test_cvname", "master_cluster_id": "00000204204071F0",
+ "master_cluster_name": "Cluster_altran-stand5", "master_vdisk_id": "305",
+ "master_vdisk_name": "master34", "aux_cluster_id": "00000204202071BC",
+ "aux_cluster_name": "aux_cluster_name", "aux_vdisk_id": "197",
+ "aux_vdisk_name": "aux34", "primary": "master", "consistency_group_id": "19 ",
+ "consistency_group_name": "test_name", "state": "consistent_synchronized",
+ "bg_copy_priority": "50", "progress": "", "freeze_time": "", "status": "online",
+ "sync": "", "copy_type": "metro", "cycling_mode": "", "cycle_period_seconds": "300",
+ "master_change_vdisk_id": "", "master_change_vdisk_name": "", "aux_change_vdisk_id": "",
+ "aux_change_vdisk_name": "", "previous_primary": "", "channel": "none"
+ }
+ cv_detach_mock.return_value = None
+ cv_delete_mock.return_value = None
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj = IBMSVCchangevolume()
+ obj.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_cv.IBMSVCchangevolume.change_volume_delete')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_cv.IBMSVCchangevolume.change_volume_detach')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_cv.IBMSVCchangevolume.get_existing_rc')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_cv.IBMSVCchangevolume.get_existing_vdisk')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_delete_non_existing_change_volume(self, sam, gevm, gerc, cv_detach_mock, cv_delete_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'cvname': 'test_cvname',
+ 'basevolume': 'test_base_volume',
+ 'state': 'absent',
+ 'rname': 'test_rname',
+ 'ismaster': 'true'
+ })
+ gevm.return_value = {}
+ gerc.return_value = {
+ "id": "305", "name": "test_cvname", "master_cluster_id": "00000204204071F0",
+ "master_cluster_name": "Cluster_altran-stand5", "master_vdisk_id": "305",
+ "master_vdisk_name": "master34", "aux_cluster_id": "00000204202071BC",
+ "aux_cluster_name": "aux_cluster_name", "aux_vdisk_id": "197",
+ "aux_vdisk_name": "aux34", "primary": "master", "consistency_group_id": "19 ",
+ "consistency_group_name": "test_name", "state": "consistent_synchronized",
+ "bg_copy_priority": "50", "progress": "", "freeze_time": "", "status": "online",
+ "sync": "", "copy_type": "metro", "cycling_mode": "", "cycle_period_seconds": "300",
+ "master_change_vdisk_id": "", "master_change_vdisk_name": "", "aux_change_vdisk_id": "",
+ "aux_change_vdisk_name": "", "previous_primary": "", "channel": "none"
+ }
+ cv_detach_mock.return_value = None
+ cv_delete_mock.return_value = None
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj = IBMSVCchangevolume()
+ obj.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_cv.IBMSVCchangevolume.change_volume_attach')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_cv.IBMSVCchangevolume.change_volume_create')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_cv.IBMSVCchangevolume.get_existing_rc')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_cv.IBMSVCchangevolume.get_existing_vdisk')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_change_volume(self, sam, gevm, germ, cv_create_mock, cv_attach_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'cvname': 'test_cvname',
+ 'basevolume': 'test_base_volume',
+ 'state': 'present',
+ 'rname': 'test_rname',
+ 'ismaster': 'true'
+ })
+ gevm.return_value = {}
+ germ.return_value = {
+ "id": "305", "name": "test_cvname", "master_cluster_id": "00000204204071F0",
+ "master_cluster_name": "Cluster_altran-stand5", "master_vdisk_id": "305",
+ "master_vdisk_name": "master34", "aux_cluster_id": "00000204202071BC",
+ "aux_cluster_name": "aux_cluster_name", "aux_vdisk_id": "197",
+ "aux_vdisk_name": "aux34", "primary": "master", "consistency_group_id": "19 ",
+ "consistency_group_name": "test_name", "state": "consistent_synchronized",
+ "bg_copy_priority": "50", "progress": "", "freeze_time": "", "status": "online",
+ "sync": "", "copy_type": "metro", "cycling_mode": "", "cycle_period_seconds": "300",
+ "master_change_vdisk_id": "", "master_change_vdisk_name": "", "aux_change_vdisk_id": "",
+ "aux_change_vdisk_name": "", "previous_primary": "", "channel": "none"
+ }
+ cv_create_mock.return_value = None
+ cv_attach_mock.return_value = None
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj = IBMSVCchangevolume()
+ obj.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_cv.IBMSVCchangevolume.change_volume_attach')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_cv.IBMSVCchangevolume.change_volume_create')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_cv.IBMSVCchangevolume.get_existing_rc')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_cv.IBMSVCchangevolume.get_existing_vdisk')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_change_volume_when_rel_absent(self, sam, gevm, germ, cv_create_mock, cv_attach_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'cvname': 'test_cvname',
+ 'basevolume': 'test_base_volume',
+ 'state': 'present',
+ 'rname': 'test_rname',
+ 'ismaster': 'true'
+ })
+ gevm.return_value = {}
+ germ.return_value = {}
+ cv_create_mock.return_value = None
+ cv_attach_mock.return_value = None
+ with pytest.raises(AnsibleFailJson) as exc:
+ obj = IBMSVCchangevolume()
+ obj.apply()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_cv.IBMSVCchangevolume.change_volume_attach')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_cv.IBMSVCchangevolume.change_volume_create')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_cv.IBMSVCchangevolume.get_existing_rc')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_cv.IBMSVCchangevolume.get_existing_vdisk')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_delete_when_change_volume_absent(self, sam, gevm, germ, cv_create_mock, cv_attach_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'cvname': 'test_cvname',
+ 'basevolume': 'test_base_volume',
+ 'state': 'absent',
+ 'rname': 'test_rname',
+ 'ismaster': 'true'
+ })
+ gevm.return_value = {}
+ germ.return_value = {}
+ cv_create_mock.return_value = None
+ cv_attach_mock.return_value = None
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj = IBMSVCchangevolume()
+ obj.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_cv.IBMSVCchangevolume.change_volume_delete')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_cv.IBMSVCchangevolume.change_volume_detach')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_cv.IBMSVCchangevolume.get_existing_rc')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_cv.IBMSVCchangevolume.change_volume_probe')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_cv.IBMSVCchangevolume.get_existing_vdisk')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_for_failure_when_copytype_not_global(self, sam, cvpm, gevm, germ, cv_detach_mock, cv_delete_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'cvname': 'test_cvname',
+ 'basevolume': 'test_base_volume',
+ 'state': 'present',
+ 'rname': 'test_rname',
+ 'ismaster': 'true'
+ })
+ gevm.return_value = {
+ 'id': '101', 'name': 'test_cvname', 'IO_group_id': '0', 'IO_group_name': 'io_grp0', 'status': 'online',
+ 'mdisk_grp_id': '1', 'mdisk_grp_name': 'AnsibleMaster', 'capacity': '536870912', 'type': 'striped',
+ 'formatted': 'yes', 'formatting': 'no', 'mdisk_id': '', 'mdisk_name': '', 'FC_id': 'many',
+ 'FC_name': 'many', 'RC_id': '101', 'RC_name': 'rcopy20', 'vdisk_UID': '60050768108101C7C0000000000005D9',
+ 'preferred_node_id': '1', 'fast_write_state': 'empty', 'cache': 'readwrite', 'udid': '', 'fc_map_count': '2',
+ 'sync_rate': '50', 'copy_count': '1', 'se_copy_count': '0', 'filesystem': '', 'mirror_write_priority': 'latency',
+ 'RC_change': 'no', 'compressed_copy_count': '0', 'access_IO_group_count': '1', 'last_access_time': '201123133855',
+ 'parent_mdisk_grp_id': '1', 'parent_mdisk_grp_name': 'AnsibleMaster', 'owner_type': 'none', 'owner_id': '',
+ 'owner_name': '', 'encrypt': 'no', 'volume_id': '101', 'volume_name': 'test_cvname', 'function': 'master',
+ 'throttle_id': '', 'throttle_name': '', 'IOPs_limit': '', 'bandwidth_limit_MB': '', 'volume_group_id': '',
+ 'volume_group_name': '', 'cloud_backup_enabled': 'no', 'cloud_account_id': '', 'cloud_account_name': '',
+ 'backup_status': 'off', 'last_backup_time': '', 'restore_status': 'none', 'backup_grain_size': '',
+ 'deduplicated_copy_count': '0', 'protocol': 'scsi', 'copy_id': '0', 'sync': 'yes', 'auto_delete': 'no',
+ 'primary': 'yes', 'used_capacity': '536870912', 'real_capacity': '536870912', 'free_capacity': '0',
+ 'overallocation': '100', 'autoexpand': '', 'warning': '', 'grainsize': '', 'se_copy': 'no', 'easy_tier': 'on',
+ 'easy_tier_status': 'balanced', 'tiers': [
+ {'tier': 'tier_scm', 'tier_capacity': '0'},
+ {'tier': 'tier0_flash', 'tier_capacity': '536870912'},
+ {'tier': 'tier1_flash', 'tier_capacity': '0'},
+ {'tier': 'tier_enterprise', 'tier_capacity': '0'},
+ {'tier': 'tier_nearline', 'tier_capacity': '0'}
+ ],
+ 'compressed_copy': 'no', 'uncompressed_used_capacity': '536870912', 'deduplicated_copy': 'no',
+ 'used_capacity_before_reduction': ''
+ }
+ germ.return_value = {
+ "id": "305", "name": "test_cvname", "master_cluster_id": "00000204204071F0",
+ "master_cluster_name": "Cluster_altran-stand5", "master_vdisk_id": "305",
+ "master_vdisk_name": "master34", "aux_cluster_id": "00000204202071BC",
+ "aux_cluster_name": "aux_cluster_name", "aux_vdisk_id": "197",
+ "aux_vdisk_name": "aux34", "primary": "master", "consistency_group_id": "19 ",
+ "consistency_group_name": "test_name", "state": "consistent_synchronized",
+ "bg_copy_priority": "50", "progress": "", "freeze_time": "", "status": "online",
+ "sync": "", "copy_type": "metro", "cycling_mode": "", "cycle_period_seconds": "300",
+ "master_change_vdisk_id": "", "master_change_vdisk_name": "", "aux_change_vdisk_id": "",
+ "aux_change_vdisk_name": "", "previous_primary": "", "channel": "none"
+ }
+ cv_detach_mock.return_value = None
+ cv_delete_mock.return_value = None
+ with pytest.raises(AnsibleFailJson) as exc:
+ obj = IBMSVCchangevolume()
+ obj.apply()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_cv.IBMSVCchangevolume.change_volume_attach')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_cv.IBMSVCchangevolume.change_volume_delete')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_cv.IBMSVCchangevolume.change_volume_detach')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_cv.IBMSVCchangevolume.get_existing_rc')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_cv.IBMSVCchangevolume.change_volume_probe')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_cv.IBMSVCchangevolume.get_existing_vdisk')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_change_volume_update(self, sam, cvpm, gevm, germ, cv_detach_mock, cv_delete_mock, cva):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'cvname': 'test_cvname',
+ 'basevolume': 'test_base_volume',
+ 'state': 'present',
+ 'rname': 'test_rname',
+ 'ismaster': 'true'
+ })
+ gevm.return_value = {
+ 'id': '101', 'name': 'test_cvname', 'IO_group_id': '0', 'IO_group_name': 'io_grp0', 'status': 'online',
+ 'mdisk_grp_id': '1', 'mdisk_grp_name': 'AnsibleMaster', 'capacity': '536870912', 'type': 'striped',
+ 'formatted': 'yes', 'formatting': 'no', 'mdisk_id': '', 'mdisk_name': '', 'FC_id': 'many',
+ 'FC_name': 'many', 'RC_id': '101', 'RC_name': 'rcopy20', 'vdisk_UID': '60050768108101C7C0000000000005D9',
+ 'preferred_node_id': '1', 'fast_write_state': 'empty', 'cache': 'readwrite', 'udid': '', 'fc_map_count': '2',
+ 'sync_rate': '50', 'copy_count': '1', 'se_copy_count': '0', 'filesystem': '', 'mirror_write_priority': 'latency',
+ 'RC_change': 'no', 'compressed_copy_count': '0', 'access_IO_group_count': '1', 'last_access_time': '201123133855',
+ 'parent_mdisk_grp_id': '1', 'parent_mdisk_grp_name': 'AnsibleMaster', 'owner_type': 'none', 'owner_id': '',
+ 'owner_name': '', 'encrypt': 'no', 'volume_id': '101', 'volume_name': 'test_cvname', 'function': 'master',
+ 'throttle_id': '', 'throttle_name': '', 'IOPs_limit': '', 'bandwidth_limit_MB': '', 'volume_group_id': '',
+ 'volume_group_name': '', 'cloud_backup_enabled': 'no', 'cloud_account_id': '', 'cloud_account_name': '',
+ 'backup_status': 'off', 'last_backup_time': '', 'restore_status': 'none', 'backup_grain_size': '',
+ 'deduplicated_copy_count': '0', 'protocol': 'scsi', 'copy_id': '0', 'sync': 'yes', 'auto_delete': 'no',
+ 'primary': 'yes', 'used_capacity': '536870912', 'real_capacity': '536870912', 'free_capacity': '0',
+ 'overallocation': '100', 'autoexpand': '', 'warning': '', 'grainsize': '', 'se_copy': 'no', 'easy_tier': 'on',
+ 'easy_tier_status': 'balanced', 'tiers': [
+ {'tier': 'tier_scm', 'tier_capacity': '0'},
+ {'tier': 'tier0_flash', 'tier_capacity': '536870912'},
+ {'tier': 'tier1_flash', 'tier_capacity': '0'},
+ {'tier': 'tier_enterprise', 'tier_capacity': '0'},
+ {'tier': 'tier_nearline', 'tier_capacity': '0'}
+ ],
+ 'compressed_copy': 'no', 'uncompressed_used_capacity': '536870912', 'deduplicated_copy': 'no',
+ 'used_capacity_before_reduction': ''
+ }
+ germ.return_value = {
+ "id": "305", "name": "test_cvname", "master_cluster_id": "00000204204071F0",
+ "master_cluster_name": "Cluster_altran-stand5", "master_vdisk_id": "305",
+ "master_vdisk_name": "master34", "aux_cluster_id": "00000204202071BC",
+ "aux_cluster_name": "aux_cluster_name", "aux_vdisk_id": "197",
+ "aux_vdisk_name": "aux34", "primary": "master", "consistency_group_id": "19 ",
+ "consistency_group_name": "test_name", "state": "consistent_synchronized",
+ "bg_copy_priority": "50", "progress": "", "freeze_time": "", "status": "online",
+ "sync": "", "copy_type": "global", "cycling_mode": "", "cycle_period_seconds": "300",
+ "master_change_vdisk_id": "", "master_change_vdisk_name": "", "aux_change_vdisk_id": "",
+ "aux_change_vdisk_name": "", "previous_primary": "", "channel": "none"
+ }
+ cv_detach_mock.return_value = None
+ cv_delete_mock.return_value = None
+ cva.return_value = None
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj = IBMSVCchangevolume()
+ obj.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_flashcopy.py b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_flashcopy.py
new file mode 100644
index 000000000..3bd145d94
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_flashcopy.py
@@ -0,0 +1,837 @@
+# Copyright (C) 2020 IBM CORPORATION
+# Author(s): Sreshtant Bohidar <sreshtant.bohidar@ibm.com>
+#
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+""" unit tests IBM Storage Virtualize Ansible module: ibm_svc_manage_flashcopy """
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import unittest
+import pytest
+import json
+from mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi
+from ansible_collections.ibm.storage_virtualize.plugins.modules.ibm_svc_manage_flashcopy import IBMSVCFlashcopy
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation """
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the
+ test case """
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the
+ test case """
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an
+ exception """
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an
+ exception """
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class TestIBMSVCFlashcopy(unittest.TestCase):
+ """ a group of related Unit Tests"""
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def setUp(self, connect):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.restapi = IBMSVCRestApi(self.mock_module_helper, '1.2.3.4',
+ 'domain.ibm.com', 'username', 'password',
+ False, 'test.log', '')
+
+ def set_default_args(self):
+ return dict({
+ 'name': 'test',
+ 'state': 'present'
+ })
+
+ def test_module_fail_when_required_args_missing(self):
+ """ required arguments are reported as errors """
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ IBMSVCFlashcopy()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_run_command(self, svc_authorize_mock, svc_obj_info_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'copytype': 'snapshot',
+ 'source': 'test_source',
+ 'target': 'test_target',
+ 'mdiskgrp': 'test_mdiskgrp',
+ 'consistgrp': 'test_consistgrp',
+ 'copyrate': 50,
+ 'grainsize': 64,
+ })
+ arg = ["lsvdisk", {'bytes': True, 'filtervalue': 'name=test_source'}, None]
+ svc_obj_info_mock.return_value = {
+ "id": "45", "name": "test_name", "source_vdisk_id": "320", "source_vdisk_name": "Ans_n7",
+ "target_vdisk_id": "323", "target_vdisk_name": "target_vdisk", "group_id": "1", "group_name": "test_group",
+ "status": "idle_or_copied", "progress": "0", "copy_rate": "0", "start_time": "",
+ "dependent_mappings": "0", "autodelete": "off", "clean_progress": "100", "clean_rate": "0",
+ "incremental": "off", "difference": "100", "grain_size": "256", "IO_group_id": "0",
+ "IO_group_name": "io_grp_name", "partner_FC_id": "43", "partner_FC_name": "test_fcmap",
+ "restoring": "no", "rc_controlled": "no", "keep_target": "no", "type": "generic",
+ "restore_progress": "0", "fc_controlled": "no", "owner_id": "", "owner_name": ""
+ }
+ obj = IBMSVCFlashcopy()
+ data = obj.run_command(arg)
+ self.assertEqual("test_name", data["name"])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_gather_data(self, svc_authorize_mock, svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'copytype': 'snapshot',
+ 'source': 'test_source',
+ 'mdiskgrp': 'test_mdiskgrp',
+ 'consistgrp': 'test_consistgrp',
+ 'copyrate': 50,
+ 'grainsize': 64,
+ })
+ svc_run_command_mock.return_value = {
+ "id": "45", "name": "test_name", "source_vdisk_id": "320", "source_vdisk_name": "Ans_n7",
+ "target_vdisk_id": "323", "target_vdisk_name": "target_vdisk", "group_id": "1", "group_name": "test_group",
+ "status": "idle_or_copied", "progress": "0", "copy_rate": "0", "start_time": "",
+ "dependent_mappings": "0", "autodelete": "off", "clean_progress": "100", "clean_rate": "0",
+ "incremental": "off", "difference": "100", "grain_size": "256", "IO_group_id": "0",
+ "IO_group_name": "io_grp_name", "partner_FC_id": "43", "partner_FC_name": "test_fcmap",
+ "restoring": "no", "rc_controlled": "no", "keep_target": "no", "type": "generic",
+ "restore_progress": "0", "fc_controlled": "no", "owner_id": "", "owner_name": ""
+ }
+ obj = IBMSVCFlashcopy()
+ data = obj.gather_data()
+ self.assertEqual("test_name", data["name"])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_gather_data(self, svc_authorize_mock, s1):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'absent',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ })
+ s1.return_value = {
+ "id": "45", "name": "test_name", "source_vdisk_id": "320", "source_vdisk_name": "Ans_n7",
+ "target_vdisk_id": "323", "target_vdisk_name": "target_vdisk", "group_id": "1", "group_name": "test_group",
+ "status": "idle_or_copied", "progress": "0", "copy_rate": "0", "start_time": "",
+ "dependent_mappings": "0", "autodelete": "off", "clean_progress": "100", "clean_rate": "0",
+ "incremental": "off", "difference": "100", "grain_size": "256", "IO_group_id": "0",
+ "IO_group_name": "io_grp_name", "partner_FC_id": "43", "partner_FC_name": "test_fcmap",
+ "restoring": "no", "rc_controlled": "no", "keep_target": "no", "type": "generic",
+ "restore_progress": "0", "fc_controlled": "no", "owner_id": "", "owner_name": ""
+ }
+ obj = IBMSVCFlashcopy()
+ data = obj.gather_data()
+ self.assertEqual(data[0]["name"], "test_name")
+
+ """
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_get_existing_fcmapping(self, svc_authorize_mock, svc_obj_info_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'copytype': 'snapshot',
+ 'source': 'test_source',
+ 'target': 'test_target',
+ 'mdiskgrp': 'test_mdiskgrp',
+ 'consistgrp': 'test_consistgrp',
+ 'copyrate': 50,
+ 'grainsize': 64,
+ })
+
+ svc_obj_info_mock.return_value = {
+ "id": "45", "name": "test_name", "source_vdisk_id": "320", "source_vdisk_name": "Ans_n7",
+ "target_vdisk_id": "323", "target_vdisk_name": "target_vdisk", "group_id": "1", "group_name": "test_group",
+ "status": "idle_or_copied", "progress": "0", "copy_rate": "0", "start_time": "",
+ "dependent_mappings": "0", "autodelete": "off", "clean_progress": "100", "clean_rate": "0",
+ "incremental": "off", "difference": "100", "grain_size": "256", "IO_group_id": "0",
+ "IO_group_name": "io_grp_name", "partner_FC_id": "43", "partner_FC_name": "test_fcmap",
+ "restoring": "no", "rc_controlled": "no", "keep_target": "no", "type": "generic",
+ "restore_progress": "0", "fc_controlled": "no", "owner_id": "", "owner_name": ""
+ }
+ obj = IBMSVCFlashcopy()
+ data = obj.get_existing_fcmapping()
+ self.assertEqual("test_name", data["name"])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_get_existing_vdisk(self, svc_authorize_mock, svc_obj_info_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'copytype': 'snapshot',
+ 'source': 'test_source',
+ 'target': 'test_target',
+ 'mdiskgrp': 'test_mdiskgrp',
+ 'consistgrp': 'test_consistgrp',
+ 'copyrate': 50,
+ 'grainsize': 64,
+ })
+ svc_obj_info_mock.return_value = [
+ {
+ "id": "500", "name": "test_source", "IO_group_id": "0", "IO_group_name": "io_grp0",
+ "status": "online", "mdisk_grp_id": "1", "mdisk_grp_name": "AnsibleMaster",
+ "capacity": "10737418240", "type": "striped", "FC_id": "", "FC_name": "", "RC_id": "500",
+ "RC_name": "rcopy_8", "vdisk_UID": "60050768108101C7C0000000000009D0", "fc_map_count": "0",
+ "copy_count": "1", "fast_write_state": "not_empty", "se_copy_count": "1", "RC_change": "no",
+ "compressed_copy_count": "0", "parent_mdisk_grp_id": "1", "parent_mdisk_grp_name": "AnsibleMaster",
+ "owner_id": "", "owner_name": "", "formatting": "no", "encrypt": "no", "volume_id": "500",
+ "volume_name": "master_vol_8", "function": "master", "protocol": ""
+ },
+ {
+ "id": "501", "name": "test_target", "IO_group_id": "0", "IO_group_name": "io_grp0",
+ "status": "online", "mdisk_grp_id": "1", "mdisk_grp_name": "AnsibleMaster", "capacity": "10737418240", "type": "striped", "FC_id": "",
+ "FC_name": "", "RC_id": "501", "RC_name": "rcopy_9", "vdisk_UID": "60050768108101C7C0000000000009D1",
+ "fc_map_count": "0", "copy_count": "1", "fast_write_state": "not_empty", "se_copy_count": "1",
+ "RC_change": "no", "compressed_copy_count": "0", "parent_mdisk_grp_id": "1",
+ "parent_mdisk_grp_name": "AnsibleMaster", "owner_id": "", "owner_name": "", "formatting": "no",
+ "encrypt": "no", "volume_id": "501", "volume_name": "master_vol_9", "function": "master", "protocol": ""
+ },
+ {
+ "id": "502", "name": "test_target_temp_xxxx", "IO_group_id": "0", "IO_group_name": "io_grp0",
+ "status": "online", "mdisk_grp_id": "1", "mdisk_grp_name": "AnsibleMaster",
+ "capacity": "10737418240", "type": "striped", "FC_id": "", "FC_name": "",
+ "RC_id": "502", "RC_name": "rcopy_10", "vdisk_UID": "60050768108101C7C0000000000009D2", "fc_map_count": "0",
+ "copy_count": "1", "fast_write_state": "not_empty", "se_copy_count": "1", "RC_change": "no", "compressed_copy_count": "0",
+ "parent_mdisk_grp_id": "1", "parent_mdisk_grp_name": "AnsibleMaster", "owner_id": "", "owner_name": "",
+ "formatting": "no", "encrypt": "no", "volume_id": "502", "volume_name": "master_vol_10", "function": "master", "protocol": ""
+ }
+ ]
+ obj = IBMSVCFlashcopy()
+ data = obj.get_existing_vdisk()
+ self.assertEqual('test_source', data[0]['name'])
+ self.assertEqual('test_target', data[1]['name'])
+ self.assertEqual('test_target_temp_xxxx', data[2][0])
+ """
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_target_create(self, svc_authorize_mock, svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'copytype': 'snapshot',
+ 'source': 'test_source',
+ 'target': 'test_target',
+ 'mdiskgrp': 'test_mdiskgrp',
+ 'consistgrp': 'test_consistgrp',
+ 'copyrate': 50,
+ 'grainsize': 64,
+ })
+ svc_run_command_mock.return_value = {
+ 'id': '324',
+ 'message': 'Volume, id [324], successfully created'
+ }
+ temp_target_name_arg = 'test_target_temp_1609848271.2538939'
+ sdata_arg = {
+ 'id': '146', 'name': 'test_source', 'IO_group_id': '0', 'IO_group_name': 'io_grp0', 'status': 'online',
+ 'mdisk_grp_id': '1', 'mdisk_grp_name': 'AnsibleMaster', 'capacity': '1073741824', 'type': 'striped',
+ 'FC_id': '', 'FC_name': '', 'RC_id': '', 'RC_name': '', 'vdisk_UID': '60050768108101C7C0000000000009E1',
+ 'fc_map_count': '0', 'copy_count': '1', 'fast_write_state': 'empty', 'se_copy_count': '0', 'RC_change': 'no',
+ 'compressed_copy_count': '0', 'parent_mdisk_grp_id': '1', 'parent_mdisk_grp_name': 'AnsibleMaster',
+ 'owner_id': '', 'owner_name': '', 'formatting': 'no', 'encrypt': 'no', 'volume_id': '146',
+ 'volume_name': 'test_source', 'function': '', 'protocol': ''
+ }
+ obj = IBMSVCFlashcopy()
+ data = obj.target_create(temp_target_name_arg, sdata_arg)
+ self.assertEqual(None, data)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_fcmap_create(self, svc_authorize_mock, svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'copytype': 'snapshot',
+ 'source': 'test_source',
+ 'target': 'test_target',
+ 'mdiskgrp': 'test_mdiskgrp',
+ 'consistgrp': 'test_consistgrp',
+ 'copyrate': 50,
+ 'grainsize': 64,
+ })
+ svc_run_command_mock.return_value = {
+ 'id': '39',
+ 'message': 'FlashCopy Mapping, id [39], successfully created'
+ }
+ temp_target_name_arg = 'test_target_temp_1609848271.2538939'
+ obj = IBMSVCFlashcopy()
+ data = obj.fcmap_create(temp_target_name_arg)
+ self.assertEqual(None, data)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_fcmap_delete(self, svc_authorize_mock, svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'copytype': 'snapshot',
+ 'source': 'test_source',
+ 'target': 'test_target',
+ 'mdiskgrp': 'test_mdiskgrp',
+ 'consistgrp': 'test_consistgrp',
+ 'copyrate': 50,
+ 'grainsize': 64,
+ })
+ svc_run_command_mock.return_value = None
+ obj = IBMSVCFlashcopy()
+ data = obj.fcmap_delete()
+ self.assertEqual(None, data)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_rename_temp_to_target(self, svc_authorize_mock, svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'copytype': 'snapshot',
+ 'source': 'test_source',
+ 'target': 'test_target',
+ 'mdiskgrp': 'test_mdiskgrp',
+ 'consistgrp': 'test_consistgrp',
+ 'copyrate': 50,
+ 'grainsize': 64,
+ })
+ temp_target_name_arg = 'test_target_temp_1609848271.2538939'
+ svc_run_command_mock.return_value = None
+ obj = IBMSVCFlashcopy()
+ data = obj.rename_temp_to_target(temp_target_name_arg)
+ self.assertEqual(None, data)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_rename_fcmap_probe(self, svc_authorize_mock, svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'copytype': 'snapshot',
+ 'source': 'test_source',
+ 'target': 'test_target',
+ 'mdiskgrp': 'test_mdiskgrp',
+ 'consistgrp': 'test_consistgrp',
+ 'copyrate': 50,
+ 'grainsize': 256,
+ })
+ data_arg = {
+ "id": "45", "name": "test_name", "source_vdisk_id": "320", "source_vdisk_name": "test_source",
+ "target_vdisk_id": "323", "target_vdisk_name": "test_target", "group_id": "1", "group_name": "test_group",
+ "status": "idle_or_copied", "progress": "0", "copy_rate": "0", "start_time": "",
+ "dependent_mappings": "0", "autodelete": "off", "clean_progress": "100", "clean_rate": "0",
+ "incremental": "off", "difference": "100", "grain_size": "256", "IO_group_id": "0",
+ "IO_group_name": "io_grp_name", "partner_FC_id": "43", "partner_FC_name": "test_fcmap",
+ "restoring": "no", "rc_controlled": "no", "keep_target": "no", "type": "generic",
+ "restore_progress": "0", "fc_controlled": "no", "owner_id": "", "owner_name": ""
+ }
+ obj = IBMSVCFlashcopy()
+ data = obj.fcmap_probe(data_arg)
+ self.assertEqual('test_consistgrp', data['consistgrp'])
+ self.assertEqual('50', data['copyrate'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_fcmap_rename(self, mock_auth, mock_old, mock_cmd):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'old_name': 'name',
+ 'name': 'new_name',
+ 'state': 'present'
+ })
+ mock_old.return_value = {
+ "id": "45", "name": "test_name", "source_vdisk_id": "320", "source_vdisk_name": "test_source",
+ "target_vdisk_id": "323", "target_vdisk_name": "test_target", "group_id": "1", "group_name": "test_consistgrp",
+ "status": "idle_or_copied", "progress": "0", "copy_rate": "50", "start_time": "",
+ "dependent_mappings": "0", "autodelete": "off", "clean_progress": "100", "clean_rate": "0",
+ "incremental": "off", "difference": "100", "grain_size": "64", "IO_group_id": "0",
+ "IO_group_name": "io_grp_name", "partner_FC_id": "43", "partner_FC_name": "test_fcmap",
+ "restoring": "no", "rc_controlled": "no", "keep_target": "no", "type": "generic",
+ "restore_progress": "0", "fc_controlled": "no", "owner_id": "", "owner_name": ""
+ }
+ mock_cmd.return_value = None
+ with pytest.raises(AnsibleFailJson) as exc:
+ obj = IBMSVCFlashcopy()
+ data = obj.flashcopy_rename()
+ self.assertEqual(True, exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_fcmap_update(self, svc_authorize_mock, svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'copytype': 'snapshot',
+ 'source': 'test_source',
+ 'target': 'test_target',
+ 'mdiskgrp': 'test_mdiskgrp',
+ 'consistgrp': 'test_consistgrp',
+ 'copyrate': 50,
+ 'grainsize': 64,
+ })
+ modify_arg = {
+ 'consistgrp': 'test_consistgrp',
+ 'copyrate': 50
+ }
+ obj = IBMSVCFlashcopy()
+ data = obj.fcmap_update(modify_arg)
+ self.assertEqual(None, data)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_flashcopy.IBMSVCFlashcopy.rename_temp_to_target')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_flashcopy.IBMSVCFlashcopy.fcmap_create')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_flashcopy.IBMSVCFlashcopy.target_create')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_flashcopy.IBMSVCFlashcopy.gather_data')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_creating_fcmap(self, svc_authorize_mock, svc_run_command_mock, gd, tcm, fcm, rtttm):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'copytype': 'snapshot',
+ 'source': 'test_source',
+ 'target': 'test_target',
+ 'mdiskgrp': 'test_mdiskgrp',
+ 'consistgrp': 'test_consistgrp',
+ 'copyrate': 50,
+ 'grainsize': 64,
+ })
+ sdata = {
+ "id": "500", "name": "test_source", "IO_group_id": "0", "IO_group_name": "io_grp0",
+ "status": "online", "mdisk_grp_id": "1", "mdisk_grp_name": "AnsibleMaster",
+ "capacity": "10737418240", "type": "striped", "FC_id": "", "FC_name": "", "RC_id": "500",
+ "RC_name": "rcopy_8", "vdisk_UID": "60050768108101C7C0000000000009D0", "fc_map_count": "0",
+ "copy_count": "1", "fast_write_state": "not_empty", "se_copy_count": "1", "RC_change": "no",
+ "compressed_copy_count": "0", "parent_mdisk_grp_id": "1", "parent_mdisk_grp_name": "AnsibleMaster",
+ "owner_id": "", "owner_name": "", "formatting": "no", "encrypt": "no", "volume_id": "500",
+ "volume_name": "master_vol_8", "function": "master", "protocol": ""
+ }
+ gd.return_value = ({}, [sdata], None, [])
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj = IBMSVCFlashcopy()
+ data = obj.apply()
+
+ self.assertEqual(True, exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_flashcopy.IBMSVCFlashcopy.gather_data')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_creating_existing_fcmap(self, svc_authorize_mock, svc_run_command_mock, gd):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'copytype': 'snapshot',
+ 'source': 'test_source',
+ 'target': 'test_target',
+ 'mdiskgrp': 'test_mdiskgrp',
+ 'consistgrp': 'test_consistgrp',
+ 'copyrate': 50,
+ 'grainsize': 64,
+ })
+ fdata = {
+ "id": "45", "name": "test_name", "source_vdisk_id": "320", "source_vdisk_name": "test_source",
+ "target_vdisk_id": "323", "target_vdisk_name": "test_target", "group_id": "1", "group_name": "test_consistgrp",
+ "status": "idle_or_copied", "progress": "0", "copy_rate": "50", "start_time": "",
+ "dependent_mappings": "0", "autodelete": "off", "clean_progress": "100", "clean_rate": "0",
+ "incremental": "off", "difference": "100", "grain_size": "64", "IO_group_id": "0",
+ "IO_group_name": "io_grp_name", "partner_FC_id": "43", "partner_FC_name": "test_fcmap",
+ "restoring": "no", "rc_controlled": "no", "keep_target": "no", "type": "generic",
+ "restore_progress": "0", "fc_controlled": "no", "owner_id": "", "owner_name": ""
+ }
+ sdata = {
+ "id": "500", "name": "test_source", "IO_group_id": "0", "IO_group_name": "io_grp0",
+ "status": "online", "mdisk_grp_id": "1", "mdisk_grp_name": "AnsibleMaster",
+ "capacity": "10737418240", "type": "striped", "FC_id": "", "FC_name": "", "RC_id": "500",
+ "RC_name": "rcopy_8", "vdisk_UID": "60050768108101C7C0000000000009D0", "fc_map_count": "0",
+ "copy_count": "1", "fast_write_state": "not_empty", "se_copy_count": "1", "RC_change": "no",
+ "compressed_copy_count": "0", "parent_mdisk_grp_id": "1", "parent_mdisk_grp_name": "AnsibleMaster",
+ "owner_id": "", "owner_name": "", "formatting": "no", "encrypt": "no", "volume_id": "500",
+ "volume_name": "master_vol_8", "function": "master", "protocol": ""
+ }
+ tdata = {
+ "id": "500", "name": "test_target", "IO_group_id": "0", "IO_group_name": "io_grp0",
+ "status": "online", "mdisk_grp_id": "1", "mdisk_grp_name": "AnsibleMaster",
+ "capacity": "10737418240", "type": "striped", "FC_id": "", "FC_name": "", "RC_id": "500",
+ "RC_name": "rcopy_8", "vdisk_UID": "60050768108101C7C0000000000009D0", "fc_map_count": "0",
+ "copy_count": "1", "fast_write_state": "not_empty", "se_copy_count": "1", "RC_change": "no",
+ "compressed_copy_count": "0", "parent_mdisk_grp_id": "1", "parent_mdisk_grp_name": "AnsibleMaster",
+ "owner_id": "", "owner_name": "", "formatting": "no", "encrypt": "no", "volume_id": "500",
+ "volume_name": "master_vol_8", "function": "master", "protocol": ""
+ }
+ gd.return_value = (fdata, sdata, tdata, [])
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj = IBMSVCFlashcopy()
+ data = obj.apply()
+
+ self.assertEqual(False, exc.value.args[0]["changed"])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_flashcopy.IBMSVCFlashcopy.gather_data')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_updating_existing_fcmap(self, svc_authorize_mock, svc_run_command_mock, gd):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'copytype': 'snapshot',
+ 'source': 'test_source',
+ 'target': 'test_target',
+ 'mdiskgrp': 'test_mdiskgrp',
+ 'consistgrp': 'test_consistgrp',
+ 'copyrate': 50,
+ 'grainsize': 64,
+ })
+ fdata = {
+ "id": "45", "name": "test_name", "source_vdisk_id": "320", "source_vdisk_name": "test_source",
+ "target_vdisk_id": "323", "target_vdisk_name": "test_target", "group_id": "1", "group_name": "new_consistgrp",
+ "status": "idle_or_copied", "progress": "0", "copy_rate": "100", "start_time": "",
+ "dependent_mappings": "0", "autodelete": "off", "clean_progress": "100", "clean_rate": "0",
+ "incremental": "off", "difference": "100", "grain_size": "64", "IO_group_id": "0",
+ "IO_group_name": "io_grp_name", "partner_FC_id": "43", "partner_FC_name": "test_fcmap",
+ "restoring": "no", "rc_controlled": "no", "keep_target": "no", "type": "generic",
+ "restore_progress": "0", "fc_controlled": "no", "owner_id": "", "owner_name": ""
+ }
+ sdata = {
+ "id": "500", "name": "test_source", "IO_group_id": "0", "IO_group_name": "io_grp0",
+ "status": "online", "mdisk_grp_id": "1", "mdisk_grp_name": "AnsibleMaster",
+ "capacity": "10737418240", "type": "striped", "FC_id": "", "FC_name": "", "RC_id": "500",
+ "RC_name": "rcopy_8", "vdisk_UID": "60050768108101C7C0000000000009D0", "fc_map_count": "0",
+ "copy_count": "1", "fast_write_state": "not_empty", "se_copy_count": "1", "RC_change": "no",
+ "compressed_copy_count": "0", "parent_mdisk_grp_id": "1", "parent_mdisk_grp_name": "AnsibleMaster",
+ "owner_id": "", "owner_name": "", "formatting": "no", "encrypt": "no", "volume_id": "500",
+ "volume_name": "master_vol_8", "function": "master", "protocol": ""
+ }
+ tdata = {
+ "id": "500", "name": "test_target", "IO_group_id": "0", "IO_group_name": "io_grp0",
+ "status": "online", "mdisk_grp_id": "1", "mdisk_grp_name": "AnsibleMaster",
+ "capacity": "10737418240", "type": "striped", "FC_id": "", "FC_name": "", "RC_id": "500",
+ "RC_name": "rcopy_8", "vdisk_UID": "60050768108101C7C0000000000009D0", "fc_map_count": "0",
+ "copy_count": "1", "fast_write_state": "not_empty", "se_copy_count": "1", "RC_change": "no",
+ "compressed_copy_count": "0", "parent_mdisk_grp_id": "1", "parent_mdisk_grp_name": "AnsibleMaster",
+ "owner_id": "", "owner_name": "", "formatting": "no", "encrypt": "no", "volume_id": "500",
+ "volume_name": "master_vol_8", "function": "master", "protocol": ""
+ }
+ gd.return_value = (fdata, sdata, tdata, [])
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj = IBMSVCFlashcopy()
+ data = obj.apply()
+
+ self.assertEqual(True, exc.value.args[0]["changed"])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_flashcopy.IBMSVCFlashcopy.gather_data')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_with_source_missing(self, svc_authorize_mock, svc_run_command_mock, gd):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'copytype': 'snapshot',
+ 'source': 'test_source',
+ 'target': 'test_target',
+ 'mdiskgrp': 'test_mdiskgrp',
+ 'consistgrp': 'test_consistgrp',
+ 'copyrate': 50,
+ 'grainsize': 64,
+ })
+ gd.return_value = (None, None, None, [])
+ with pytest.raises(AnsibleFailJson) as exc:
+ obj = IBMSVCFlashcopy()
+ data = obj.apply()
+
+ self.assertEqual(True, exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_flashcopy.IBMSVCFlashcopy.rename_temp_to_target')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_flashcopy.IBMSVCFlashcopy.fcmap_create')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_flashcopy.IBMSVCFlashcopy.target_create')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_flashcopy.IBMSVCFlashcopy.gather_data')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_failure_with_more_temp_vdisk(self, svc_authorize_mock, svc_run_command_mock, gd, tcm, fcm, rtttm):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'copytype': 'snapshot',
+ 'source': 'test_source',
+ 'target': 'test_target',
+ 'mdiskgrp': 'test_mdiskgrp',
+ 'consistgrp': 'test_consistgrp',
+ 'copyrate': 50,
+ 'grainsize': 64,
+ })
+ sdata = {
+ "id": "500", "name": "test_source", "IO_group_id": "0", "IO_group_name": "io_grp0",
+ "status": "online", "mdisk_grp_id": "1", "mdisk_grp_name": "AnsibleMaster",
+ "capacity": "10737418240", "type": "striped", "FC_id": "", "FC_name": "", "RC_id": "500",
+ "RC_name": "rcopy_8", "vdisk_UID": "60050768108101C7C0000000000009D0", "fc_map_count": "0",
+ "copy_count": "1", "fast_write_state": "not_empty", "se_copy_count": "1", "RC_change": "no",
+ "compressed_copy_count": "0", "parent_mdisk_grp_id": "1", "parent_mdisk_grp_name": "AnsibleMaster",
+ "owner_id": "", "owner_name": "", "formatting": "no", "encrypt": "no", "volume_id": "500",
+ "volume_name": "master_vol_8", "function": "master", "protocol": ""
+ }
+ temp1 = {
+ "id": "500", "name": "test_target_temp_1609848271.2538939", "IO_group_id": "0", "IO_group_name": "io_grp0",
+ "status": "online", "mdisk_grp_id": "1", "mdisk_grp_name": "AnsibleMaster",
+ "capacity": "10737418240", "type": "striped", "FC_id": "", "FC_name": "", "RC_id": "500",
+ "RC_name": "rcopy_8", "vdisk_UID": "60050768108101C7C0000000000009D0", "fc_map_count": "0",
+ "copy_count": "1", "fast_write_state": "not_empty", "se_copy_count": "1", "RC_change": "no",
+ "compressed_copy_count": "0", "parent_mdisk_grp_id": "1", "parent_mdisk_grp_name": "AnsibleMaster",
+ "owner_id": "", "owner_name": "", "formatting": "no", "encrypt": "no", "volume_id": "500",
+ "volume_name": "master_vol_8", "function": "master", "protocol": ""
+ }
+ temp2 = {
+ "id": "500", "name": "test_target_temp_1609848272.2538939", "IO_group_id": "0", "IO_group_name": "io_grp0",
+ "status": "online", "mdisk_grp_id": "1", "mdisk_grp_name": "AnsibleMaster",
+ "capacity": "10737418240", "type": "striped", "FC_id": "", "FC_name": "", "RC_id": "500",
+ "RC_name": "rcopy_8", "vdisk_UID": "60050768108101C7C0000000000009D0", "fc_map_count": "0",
+ "copy_count": "1", "fast_write_state": "not_empty", "se_copy_count": "1", "RC_change": "no",
+ "compressed_copy_count": "0", "parent_mdisk_grp_id": "1", "parent_mdisk_grp_name": "AnsibleMaster",
+ "owner_id": "", "owner_name": "", "formatting": "no", "encrypt": "no", "volume_id": "500",
+ "volume_name": "master_vol_8", "function": "master", "protocol": ""
+ }
+ gd.return_value = ({}, sdata, None, [temp1, temp2])
+ with pytest.raises(AnsibleFailJson) as exc:
+ obj = IBMSVCFlashcopy()
+ data = obj.apply()
+
+ self.assertEqual(True, exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_flashcopy.IBMSVCFlashcopy.gather_data')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_deleting_existing_fcmap(self, svc_authorize_mock, svc_run_command_mock, gd):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'absent',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ })
+ fdata = {
+ "id": "45", "name": "test_name", "source_vdisk_id": "320", "source_vdisk_name": "Ans_n7",
+ "target_vdisk_id": "323", "target_vdisk_name": "target_vdisk", "group_id": "1", "group_name": "new_consistgrp",
+ "status": "idle_or_copied", "progress": "0", "copy_rate": "100", "start_time": "",
+ "dependent_mappings": "0", "autodelete": "off", "clean_progress": "100", "clean_rate": "0",
+ "incremental": "off", "difference": "100", "grain_size": "64", "IO_group_id": "0",
+ "IO_group_name": "io_grp_name", "partner_FC_id": "43", "partner_FC_name": "test_fcmap",
+ "restoring": "no", "rc_controlled": "no", "keep_target": "no", "type": "generic",
+ "restore_progress": "0", "fc_controlled": "no", "owner_id": "", "owner_name": ""
+ }
+ gd.return_value = [fdata, None, None, []]
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj = IBMSVCFlashcopy()
+ data = obj.apply()
+
+ self.assertEqual(True, exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_flashcopy.IBMSVCFlashcopy.gather_data')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_deleting_non_existing_fcmap(self, svc_authorize_mock, svc_run_command_mock, gd):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'absent',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name'
+ })
+ gd.return_value = [{}, None, None, []]
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj = IBMSVCFlashcopy()
+ data = obj.apply()
+
+ self.assertEqual(False, exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_failure_create_with_missing_parameter(self, svc_authorize_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ })
+ with pytest.raises(AnsibleFailJson) as exc:
+ obj = IBMSVCFlashcopy()
+ data = obj.apply()
+
+ self.assertEqual(True, exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_failure_with_copyrate_outside_range(self, svc_authorize_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'absent',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'copytype': 'snapshot',
+ 'source': 'test_source',
+ 'target': 'test_target',
+ 'mdiskgrp': 'test_mdiskgrp',
+ 'consistgrp': 'test_consistgrp',
+ 'copyrate': 500,
+ 'grainsize': 64,
+ })
+ with pytest.raises(AnsibleFailJson) as exc:
+ obj = IBMSVCFlashcopy()
+ data = obj.apply()
+
+ self.assertEqual(True, exc.value.args[0]['failed'])
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_ip.py b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_ip.py
new file mode 100644
index 000000000..f15780870
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_ip.py
@@ -0,0 +1,742 @@
+# Copyright (C) 2022 IBM CORPORATION
+# Author(s): Sreshtant Bohidar <sreshtant.bohidar@ibm.com>
+#
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+""" unit tests IBM Storage Virtualize Ansible module: ibm_svc_usergroup """
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import unittest
+import pytest
+import json
+from mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi
+from ansible_collections.ibm.storage_virtualize.plugins.modules.ibm_svc_manage_ip import IBMSVCIp
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module
+ creation """
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the
+ test case """
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the
+ test case """
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an
+ exception """
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an
+ exception """
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class TestIBMSVCUser(unittest.TestCase):
+ """ a group of related Unit Tests"""
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def setUp(self, connect):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.restapi = IBMSVCRestApi(self.mock_module_helper, '1.2.3.4',
+ 'domain.ibm.com', 'username', 'password',
+ False, 'test.log', '')
+
+ def set_default_args(self):
+ return dict({
+ 'name': 'test',
+ 'state': 'present'
+ })
+
+ def test_module_fail_when_required_args_missing(self):
+ """ required arguments are reported as errors """
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ IBMSVCIp()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_basic_checks(self, mock_svc_authorize):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'node': 'node1',
+ 'port': 1,
+ 'portset': 0,
+ 'ip_address': '10.0.1.1',
+ 'subnet_prefix': 20,
+ 'gateway': '10.10.10.10',
+ 'vlan': 1,
+ 'shareip': True,
+ 'state': 'present'
+ })
+ ip = IBMSVCIp()
+ data = ip.basic_checks()
+ self.assertEqual(data, None)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_get_ip_info(self, mock_svc_authorize, soim):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'node': 'node1',
+ 'port': 1,
+ 'portset': 'portset0',
+ 'ip_address': '10.0.1.1',
+ 'subnet_prefix': 20,
+ 'gateway': '10.10.10.10',
+ 'vlan': 1,
+ 'shareip': True,
+ 'state': 'present'
+ })
+ soim.return_value = [
+ {
+ "id": "0",
+ "node_id": "1",
+ "node_name": "node1",
+ "port_id": "1",
+ "portset_id": "0",
+ "portset_name": "portset0",
+ "IP_address": "10.0.1.1",
+ "prefix": "20",
+ "vlan": "",
+ "gateway": "",
+ "owner_id": "",
+ "owner_name": ""
+ },
+ {
+ "id": "1",
+ "node_id": "1",
+ "node_name": "node1",
+ "port_id": "1",
+ "portset_id": "1",
+ "portset_name": "portset1",
+ "IP_address": "10.0.1.2",
+ "prefix": "20",
+ "vlan": "",
+ "gateway": "",
+ "owner_id": "",
+ "owner_name": ""
+ },
+ {
+ "id": "2",
+ "node_id": "1",
+ "node_name": "node1",
+ "port_id": "1",
+ "portset_id": "2",
+ "portset_name": "portset2",
+ "IP_address": "10.0.1.3",
+ "prefix": "20",
+ "vlan": "",
+ "gateway": "",
+ "owner_id": "",
+ "owner_name": ""
+ },
+ {
+ "id": "3",
+ "node_id": "1",
+ "node_name": "node1",
+ "port_id": "1",
+ "portset_id": "3",
+ "portset_name": "portset3",
+ "IP_address": "10.0.1.4",
+ "prefix": "20",
+ "vlan": "",
+ "gateway": "",
+ "owner_id": "",
+ "owner_name": ""
+ },
+ {
+ "id": "4",
+ "node_id": "1",
+ "node_name": "node1",
+ "port_id": "1",
+ "portset_id": "4",
+ "portset_name": "Portset4",
+ "IP_address": "10.0.1.5",
+ "prefix": "20",
+ "vlan": "",
+ "gateway": "",
+ "owner_id": "",
+ "owner_name": ""
+ }
+ ]
+ ip = IBMSVCIp()
+ data = ip.get_ip_info()
+ self.assertEqual(data[0]["IP_address"], "10.0.1.1")
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_ip(self, mock_svc_authorize, srcm):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'node': 'node1',
+ 'port': 1,
+ 'portset': 0,
+ 'ip_address': '10.0.1.1',
+ 'subnet_prefix': 20,
+ 'gateway': '10.10.10.10',
+ 'vlan': 1,
+ 'shareip': True,
+ 'state': 'present'
+ })
+ srcm.return_value = {
+ 'id': '0',
+ 'message': 'IP Address, id [0], successfully created'
+ }
+ ip = IBMSVCIp()
+ data = ip.create_ip()
+ self.assertEqual(data, None)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_remove_ip(self, mock_svc_authorize, srcm):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'node': 'node1',
+ 'port': 1,
+ 'portset': 0,
+ 'ip_address': '10.0.1.1',
+ 'subnet_prefix': 20,
+ 'gateway': '10.10.10.10',
+ 'vlan': 1,
+ 'shareip': True,
+ 'state': 'absent'
+ })
+ srcm.return_value = None
+ ip = IBMSVCIp()
+ data = ip.remove_ip(0)
+ self.assertEqual(data, None)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_when_state_absent(self, mock_svc_authorize):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'node': 'node1',
+ 'port': 1,
+ 'portset': 0,
+ 'ip_address': '10.0.1.1',
+ 'subnet_prefix': 20,
+ 'gateway': '10.10.10.10',
+ 'vlan': 1,
+ 'shareip': True
+ })
+ with pytest.raises(AnsibleFailJson) as exc:
+ ip = IBMSVCIp()
+ ip.apply()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_when_node_absent(self, mock_svc_authorize):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'port': 1,
+ 'portset': 0,
+ 'ip_address': '10.0.1.1',
+ 'subnet_prefix': 20,
+ 'gateway': '10.10.10.10',
+ 'vlan': 1,
+ 'shareip': True,
+ 'state': 'present'
+ })
+ with pytest.raises(AnsibleFailJson) as exc:
+ ip = IBMSVCIp()
+ ip.apply()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_when_port_absent(self, mock_svc_authorize):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'node': 'node1',
+ 'portset': 0,
+ 'ip_address': '10.0.1.1',
+ 'subnet_prefix': 20,
+ 'gateway': '10.10.10.10',
+ 'vlan': 1,
+ 'shareip': True,
+ 'state': 'present'
+ })
+ with pytest.raises(AnsibleFailJson) as exc:
+ ip = IBMSVCIp()
+ ip.apply()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_when_portset_absent(self, mock_svc_authorize):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'node': 'node1',
+ 'port': 1,
+ 'ip_address': '10.0.1.1',
+ 'subnet_prefix': 20,
+ 'gateway': '10.10.10.10',
+ 'vlan': 1,
+ 'shareip': True,
+ 'state': 'present'
+ })
+ with pytest.raises(AnsibleFailJson) as exc:
+ ip = IBMSVCIp()
+ ip.apply()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_when_ip_missing(self, mock_svc_authorize):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'node': 'node1',
+ 'port': 1,
+ 'portset': 0,
+ 'subnet_prefix': 20,
+ 'gateway': '10.10.10.10',
+ 'vlan': 1,
+ 'shareip': True,
+ 'state': 'present'
+ })
+ with pytest.raises(AnsibleFailJson) as exc:
+ ip = IBMSVCIp()
+ ip.apply()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_remove_when_node_missing(self, mock_svc_authorize):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'port': 1,
+ 'portset': 0,
+ 'ip_address': '10.0.1.1',
+ 'state': 'absent'
+ })
+ with pytest.raises(AnsibleFailJson) as exc:
+ ip = IBMSVCIp()
+ ip.apply()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_remove_when_port_missing(self, mock_svc_authorize):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'node': 'node1',
+ 'portset': 0,
+ 'ip_address': '10.0.1.1',
+ 'state': 'absent'
+ })
+ with pytest.raises(AnsibleFailJson) as exc:
+ ip = IBMSVCIp()
+ ip.apply()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_remove_when_portset_missing(self, mock_svc_authorize):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'node': 'node1',
+ 'port': 1,
+ 'ip_address': '10.0.1.1',
+ 'state': 'absent'
+ })
+ with pytest.raises(AnsibleFailJson) as exc:
+ ip = IBMSVCIp()
+ ip.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_remove_when_ip_missing(self, mock_svc_authorize):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'node': 'node1',
+ 'port': 1,
+ 'portset': 0,
+ 'state': 'absent'
+ })
+ with pytest.raises(AnsibleFailJson) as exc:
+ ip = IBMSVCIp()
+ ip.apply()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_remove_when_subnet_present(self, mock_svc_authorize):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'node': 'node1',
+ 'port': 1,
+ 'portset': 0,
+ 'ip_address': '10.0.1.1',
+ 'subnet_prefix': 20,
+ 'state': 'absent'
+ })
+ with pytest.raises(AnsibleFailJson) as exc:
+ ip = IBMSVCIp()
+ ip.apply()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_remove_when_gateway_present(self, mock_svc_authorize):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'node': 'node1',
+ 'port': 1,
+ 'portset': 0,
+ 'ip_address': '10.0.1.1',
+ 'gateway': '10.10.10.10',
+ 'state': 'absent'
+ })
+ with pytest.raises(AnsibleFailJson) as exc:
+ ip = IBMSVCIp()
+ ip.apply()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_remove_when_vlan_present(self, mock_svc_authorize):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'node': 'node1',
+ 'port': 1,
+ 'portset': 0,
+ 'ip_address': '10.0.1.1',
+ 'vlan': 1,
+ 'state': 'absent'
+ })
+ with pytest.raises(AnsibleFailJson) as exc:
+ ip = IBMSVCIp()
+ ip.apply()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_remove_when_shareip_present(self, mock_svc_authorize):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'node': 'node1',
+ 'port': 1,
+ 'portset': 0,
+ 'ip_address': '10.0.1.1',
+ 'shareip': True,
+ 'state': 'absent'
+ })
+ with pytest.raises(AnsibleFailJson) as exc:
+ ip = IBMSVCIp()
+ ip.apply()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_creation(self, mock_svc_authorize, soi, src):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'node': 'node1',
+ 'port': 1,
+ 'portset': 0,
+ 'ip_address': '10.0.1.1',
+ 'subnet_prefix': 20,
+ 'gateway': '10.10.10.10',
+ 'vlan': 1,
+ 'shareip': True,
+ 'state': 'present'
+ })
+ soi.return_value = []
+ src.return_value = {
+ 'id': '0',
+ 'message': 'IP Address, id [0], successfully created'
+ }
+ with pytest.raises(AnsibleExitJson) as exc:
+ ip = IBMSVCIp()
+ ip.apply()
+ self.assertEquall(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_deletion(self, mock_svc_authorize, soi, src):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'node': 'node1',
+ 'port': 1,
+ 'portset': 0,
+ 'ip_address': '10.0.1.1',
+ 'state': 'absent'
+ })
+ soi.return_value = [
+ {
+ "id": "0",
+ "node_id": "1",
+ "node_name": "node1",
+ "port_id": "1",
+ "portset_id": "0",
+ "portset_name": "portset0",
+ "IP_address": "10.0.1.1",
+ "prefix": "20",
+ "vlan": "",
+ "gateway": "",
+ "owner_id": "",
+ "owner_name": ""
+ },
+ {
+ "id": "1",
+ "node_id": "1",
+ "node_name": "node1",
+ "port_id": "1",
+ "portset_id": "1",
+ "portset_name": "portset1",
+ "IP_address": "10.0.1.2",
+ "prefix": "20",
+ "vlan": "",
+ "gateway": "",
+ "owner_id": "",
+ "owner_name": ""
+ },
+ {
+ "id": "2",
+ "node_id": "1",
+ "node_name": "node1",
+ "port_id": "1",
+ "portset_id": "2",
+ "portset_name": "portset2",
+ "IP_address": "10.0.1.3",
+ "prefix": "20",
+ "vlan": "",
+ "gateway": "",
+ "owner_id": "",
+ "owner_name": ""
+ },
+ {
+ "id": "3",
+ "node_id": "1",
+ "node_name": "node1",
+ "port_id": "1",
+ "portset_id": "3",
+ "portset_name": "portset3",
+ "IP_address": "10.0.1.4",
+ "prefix": "20",
+ "vlan": "",
+ "gateway": "",
+ "owner_id": "",
+ "owner_name": ""
+ },
+ {
+ "id": "4",
+ "node_id": "1",
+ "node_name": "node1",
+ "port_id": "1",
+ "portset_id": "4",
+ "portset_name": "Portset4",
+ "IP_address": "10.0.1.5",
+ "prefix": "20",
+ "vlan": "",
+ "gateway": "",
+ "owner_id": "",
+ "owner_name": ""
+ }
+ ]
+ src.return_value = None
+ with pytest.raises(AnsibleExitJson) as exc:
+ ip = IBMSVCIp()
+ ip.apply()
+ self.assertEqual(exc.value.args[0]['changed'], True)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_failure_deletion_when_multiple_IP_detected(self, mock_svc_authorize, soi):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'node': 'node1',
+ 'port': 1,
+ 'ip_address': '10.0.1.1',
+ 'subnet_prefix': 20,
+ 'gateway': '10.10.10.10',
+ 'vlan': 1,
+ 'shareip': True,
+ 'state': 'absent'
+ })
+ soi.return_value = [
+ {
+ "id": "0",
+ "node_id": "1",
+ "node_name": "node1",
+ "port_id": "1",
+ "portset_id": "0",
+ "portset_name": "portset0",
+ "IP_address": "10.0.1.1",
+ "prefix": "20",
+ "vlan": "",
+ "gateway": "",
+ "owner_id": "",
+ "owner_name": ""
+ },
+ {
+ "id": "1",
+ "node_id": "1",
+ "node_name": "node1",
+ "port_id": "1",
+ "portset_id": "0",
+ "portset_name": "portset0",
+ "IP_address": "10.0.1.1",
+ "prefix": "20",
+ "vlan": "",
+ "gateway": "",
+ "owner_id": "",
+ "owner_name": ""
+ },
+ {
+ "id": "2",
+ "node_id": "1",
+ "node_name": "node1",
+ "port_id": "1",
+ "portset_id": "2",
+ "portset_name": "portset2",
+ "IP_address": "10.0.1.3",
+ "prefix": "20",
+ "vlan": "",
+ "gateway": "",
+ "owner_id": "",
+ "owner_name": ""
+ },
+ {
+ "id": "3",
+ "node_id": "1",
+ "node_name": "node1",
+ "port_id": "1",
+ "portset_id": "3",
+ "portset_name": "portset3",
+ "IP_address": "10.0.1.4",
+ "prefix": "20",
+ "vlan": "",
+ "gateway": "",
+ "owner_id": "",
+ "owner_name": ""
+ },
+ {
+ "id": "4",
+ "node_id": "1",
+ "node_name": "node1",
+ "port_id": "1",
+ "portset_id": "4",
+ "portset_name": "Portset4",
+ "IP_address": "10.0.1.5",
+ "prefix": "20",
+ "vlan": "",
+ "gateway": "",
+ "owner_id": "",
+ "owner_name": ""
+ }
+ ]
+ with pytest.raises(AnsibleFailJson) as exc:
+ ip = IBMSVCIp()
+ ip.apply()
+ self.assertEqual(exc.value.args[0]['failed'], True)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_migration.py b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_migration.py
new file mode 100644
index 000000000..1f8f991ef
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_migration.py
@@ -0,0 +1,1613 @@
+# Copyright (C) 2020 IBM CORPORATION
+# Author(s):
+#
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+""" unit tests IBM Storage Virtualize Ansible module: ibm_svc_manage_migration """
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import unittest
+import pytest
+import json
+from mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi
+from ansible_collections.ibm.storage_virtualize.plugins.modules.ibm_svc_manage_migration import IBMSVCMigrate
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module
+ creation """
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the
+ test case """
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the
+ test case """
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an
+ exception """
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an
+ exception """
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class TestIBMSVCMigrate(unittest.TestCase):
+ """ a group of related Unit Tests"""
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def setUp(self, connect):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.restapi = IBMSVCRestApi(self.mock_module_helper, '1.2.3.4',
+ 'domain.ibm.com', 'username', 'password',
+ False, 'test.log', '')
+
+ def set_default_args(self):
+ return dict({
+ 'name': 'test',
+ 'state': 'present'
+ })
+
+ def test_module_fail_when_required_args_missing(self):
+ """ required arguments are reported as errors """
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ IBMSVCMigrate()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_discover_partner_system(self, auth, cmd1):
+ set_module_args({
+ "source_volume": "tesla",
+ "target_volume": "tesla_target",
+ "clustername": "x.x.x.x",
+ "remote_cluster": "Cluster_x.x.x.x",
+ "username": "username",
+ "password": "password",
+ "state": "initiate",
+ "replicate_hosts": True,
+ "remote_username": "remote_username",
+ "remote_password": "remote_password",
+ "relationship_name": "migrate_tesla",
+ "remote_pool": "site2pool1"
+ })
+ cmd1.return_value = {
+ "id": "0000010022206192", "name": "Cluster_x.x.x.x", "location": "remote",
+ "partnership": "fully_configured", "code_level": "8.4.2.0 (build 154.19.2106231326000)",
+ "console_IP": "9.71.42.198:443", "gm_link_tolerance": "300",
+ "gm_inter_cluster_delay_simulation": "0", "gm_intra_cluster_delay_simulation": "0",
+ "relationship_bandwidth_limit": "25", "gm_max_host_delay": "5", "type": "fc",
+ "cluster_ip": "", "chap_secret": "", "event_log_sequence": "", "link_bandwidth_mbits": "100",
+ "background_copy_rate": "50", "max_replication_delay": "0", "compressed": "no", "link1": "",
+ "link2": "", "link1_ip_id": "", "link2_ip_id": ""
+ }
+ m = IBMSVCMigrate()
+ data = m.discover_partner_system()
+ self.assertEqual(data, '9.71.42.198')
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_failure_when_partner_local(self, auth, cmd1):
+ set_module_args({
+ "source_volume": "tesla",
+ "target_volume": "tesla_target",
+ "clustername": "x.x.x.x",
+ "remote_cluster": "Cluster_x.x.x.x",
+ "username": "username",
+ "password": "password",
+ "state": "initiate",
+ "replicate_hosts": True,
+ "remote_username": "remote_username",
+ "remote_password": "remote_password",
+ "relationship_name": "migrate_tesla",
+ "remote_pool": "site2pool1"
+ })
+ cmd1.return_value = {
+ "id": "0000010022206192", "name": "Cluster_x.x.x.x", "location": "local",
+ "partnership": "fully_configured", "code_level": "8.4.2.0 (build 154.19.2106231326000)",
+ "console_IP": "9.71.42.198:443", "gm_link_tolerance": "300",
+ "gm_inter_cluster_delay_simulation": "0", "gm_intra_cluster_delay_simulation": "0",
+ "relationship_bandwidth_limit": "25", "gm_max_host_delay": "5", "type": "fc",
+ "cluster_ip": "", "chap_secret": "", "event_log_sequence": "", "link_bandwidth_mbits": "100",
+ "background_copy_rate": "50", "max_replication_delay": "0", "compressed": "no", "link1": "",
+ "link2": "", "link1_ip_id": "", "link2_ip_id": ""
+ }
+ with pytest.raises(AnsibleFailJson) as exc:
+ m = IBMSVCMigrate()
+ data = m.discover_partner_system()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_failure_when_partner_absent(self, auth, cmd1):
+ set_module_args({
+ "source_volume": "tesla",
+ "target_volume": "tesla_target",
+ "clustername": "x.x.x.x",
+ "remote_cluster": "Cluster_x.x.x.x",
+ "username": "username",
+ "password": "password",
+ "state": "initiate",
+ "replicate_hosts": True,
+ "remote_username": "remote_username",
+ "remote_password": "remote_password",
+ "relationship_name": "migrate_tesla",
+ "remote_pool": "site2pool1"
+ })
+ cmd1.return_value = None
+ with pytest.raises(AnsibleFailJson) as exc:
+ m = IBMSVCMigrate()
+ data = m.discover_partner_system()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_migration.IBMSVCMigrate.discover_partner_system')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_construct_remote_rest(self, auth, dps):
+ set_module_args({
+ "source_volume": "tesla",
+ "target_volume": "tesla_target",
+ "clustername": "x.x.x.x",
+ "remote_cluster": "Cluster_x.x.x.x",
+ "username": "username",
+ "password": "password",
+ "state": "initiate",
+ "replicate_hosts": True,
+ "remote_username": "remote_username",
+ "remote_password": "remote_password",
+ "relationship_name": "migrate_tesla",
+ "remote_pool": "site2pool1"
+ })
+ dps.return_value = "9.71.42.198"
+ m = IBMSVCMigrate()
+ data = m.construct_remote_rest()
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_migration.IBMSVCMigrate.construct_remote_rest')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_get_existing_vdisk(self, auth, cmd1, crr, cmd2):
+ set_module_args({
+ "source_volume": "tesla",
+ "target_volume": "tesla_target",
+ "clustername": "x.x.x.x",
+ "remote_cluster": "Cluster_x.x.x.x",
+ "username": "username",
+ "password": "password",
+ "state": "initiate",
+ "replicate_hosts": True,
+ "remote_username": "remote_username",
+ "remote_password": "remote_password",
+ "relationship_name": "migrate_tesla",
+ "remote_pool": "site2pool1"
+ })
+ cmd1.return_value = [
+ {
+ "id": "69", "name": "tesla", "IO_group_id": "0", "IO_group_name": "io_grp0",
+ "status": "online", "mdisk_grp_id": "2", "mdisk_grp_name": "site1pool1",
+ "capacity": "10485760", "type": "striped", "formatted": "yes", "formatting": "no",
+ "mdisk_id": "", "mdisk_name": "", "FC_id": "", "FC_name": "", "RC_id": "",
+ "RC_name": "", "vdisk_UID": "60050768108180ED7000000000000388", "preferred_node_id": "1",
+ "fast_write_state": "not_empty", "cache": "readwrite", "udid": "", "fc_map_count": "0",
+ "sync_rate": "50", "copy_count": "1", "se_copy_count": "0", "filesystem": "",
+ "mirror_write_priority": "latency", "RC_change": "no", "compressed_copy_count": "0",
+ "access_IO_group_count": "1", "last_access_time": "", "parent_mdisk_grp_id": "2",
+ "parent_mdisk_grp_name": "site1pool1", "owner_type": "none", "owner_id": "",
+ "owner_name": "", "encrypt": "no", "volume_id": "69", "volume_name": "tesla",
+ "function": "", "throttle_id": "", "throttle_name": "", "IOPs_limit": "",
+ "bandwidth_limit_MB": "", "volume_group_id": "", "volume_group_name": "",
+ "cloud_backup_enabled": "no", "cloud_account_id": "", "cloud_account_name": "",
+ "backup_status": "off", "last_backup_time": "", "restore_status": "none",
+ "backup_grain_size": "", "deduplicated_copy_count": "0", "protocol": "scsi",
+ "preferred_node_name": "node1", "safeguarded_expiration_time": "",
+ "safeguarded_backup_count": "0"
+ },
+ {
+ "copy_id": "0", "status": "online", "sync": "yes", "auto_delete": "no", "primary": "yes",
+ "mdisk_grp_id": "2", "mdisk_grp_name": "site1pool1", "type": "striped", "mdisk_id": "",
+ "mdisk_name": "", "fast_write_state": "not_empty", "used_capacity": "10485760",
+ "real_capacity": "10485760", "free_capacity": "0", "overallocation": "100",
+ "autoexpand": "", "warning": "", "grainsize": "", "se_copy": "no", "easy_tier": "on",
+ "easy_tier_status": "balanced", "tiers": [
+ {"tier": "tier_scm", "tier_capacity": "0"},
+ {"tier": "tier0_flash", "tier_capacity": "10485760"},
+ {"tier": "tier1_flash", "tier_capacity": "0"},
+ {"tier": "tier_enterprise", "tier_capacity": "0"},
+ {"tier": "tier_nearline", "tier_capacity": "0"}
+ ], "compressed_copy": "no", "uncompressed_used_capacity": "10485760",
+ "parent_mdisk_grp_id": "2", "parent_mdisk_grp_name": "site1pool1", "encrypt": "no",
+ "deduplicated_copy": "no", "used_capacity_before_reduction": "",
+ "safeguarded_mdisk_grp_id": "", "safeguarded_mdisk_grp_name": ""
+ }
+ ]
+ cmd2.return_value = None
+ m = IBMSVCMigrate()
+ m.get_existing_vdisk()
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_basic_checks(self, auth):
+ set_module_args({
+ "source_volume": "tesla",
+ "target_volume": "tesla_target",
+ "clustername": "x.x.x.x",
+ "remote_cluster": "Cluster_x.x.x.x",
+ "username": "username",
+ "password": "password",
+ "state": "initiate",
+ "replicate_hosts": True,
+ "remote_username": "remote_username",
+ "remote_password": "remote_password",
+ "relationship_name": "migrate_tesla",
+ "remote_pool": "site2pool1"
+ })
+ m = IBMSVCMigrate()
+ m.basic_checks()
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_failure_for_missing_token_parameter(self, auth):
+ set_module_args({
+ "source_volume": "tesla",
+ "target_volume": "tesla_target",
+ "clustername": "x.x.x.x",
+ "remote_cluster": "Cluster_x.x.x.x",
+ # "username": "username",
+ # "password": "password",
+ "state": "initiate",
+ "replicate_hosts": True,
+ "remote_username": "remote_username",
+ "remote_password": "remote_password",
+ "relationship_name": "migrate_tesla",
+ "remote_pool": "site2pool1"
+ })
+ with pytest.raises(AnsibleFailJson) as exc:
+ m = IBMSVCMigrate()
+ m.basic_checks()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_failure_for_missing_initiate_parameters(self, auth):
+ set_module_args({
+ # "source_volume": "tesla",
+ # "target_volume": "tesla_target",
+ "clustername": "x.x.x.x",
+ # "remote_cluster": "Cluster_x.x.x.x",
+ "username": "username",
+ "password": "password",
+ "state": "initiate",
+ "replicate_hosts": True,
+ # "remote_username": "remote_username",
+ # "remote_password": "remote_password",
+ # "relationship_name": "migrate_tesla",
+ # "remote_pool": "site2pool1"
+ })
+ with pytest.raises(AnsibleFailJson) as exc:
+ m = IBMSVCMigrate()
+ m.basic_checks()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_failure_for_missing_switch_parameters(self, auth):
+ set_module_args({
+ "source_volume": "tesla",
+ "target_volume": "tesla_target",
+ "clustername": "x.x.x.x",
+ "remote_cluster": "Cluster_x.x.x.x",
+ "username": "username",
+ "password": "password",
+ "state": "initiate",
+ "replicate_hosts": True,
+ "remote_username": "remote_username",
+ "remote_password": "remote_password",
+ # "relationship_name": "migrate_tesla",
+ "remote_pool": "site2pool1"
+ })
+ with pytest.raises(AnsibleFailJson) as exc:
+ m = IBMSVCMigrate()
+ m.basic_checks()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_failure_for_missing_cleanup_parameters(self, auth):
+ set_module_args({
+ # "source_volume": "tesla",
+ "target_volume": "tesla_target",
+ "clustername": "x.x.x.x",
+ "remote_cluster": "Cluster_x.x.x.x",
+ "username": "username",
+ "password": "password",
+ "state": "initiate",
+ "replicate_hosts": True,
+ "remote_username": "remote_username",
+ "remote_password": "remote_password",
+ "relationship_name": "migrate_tesla",
+ "remote_pool": "site2pool1"
+ })
+ with pytest.raises(AnsibleFailJson) as exc:
+ m = IBMSVCMigrate()
+ m.basic_checks()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_get_source_hosts(self, auth, cmd1):
+ set_module_args({
+ "source_volume": "tesla",
+ "target_volume": "tesla_target",
+ "clustername": "x.x.x.x",
+ "remote_cluster": "Cluster_x.x.x.x",
+ "username": "username",
+ "password": "password",
+ "state": "initiate",
+ "replicate_hosts": True,
+ "remote_username": "remote_username",
+ "remote_password": "remote_password",
+ "relationship_name": "migrate_tesla",
+ "remote_pool": "site2pool1"
+ })
+ cmd1.return_value = [
+ {
+ "id": "69",
+ "name": "tesla",
+ "SCSI_id": "3",
+ "host_id": "0",
+ "host_name": "altran-esxi-06-iscsi",
+ "vdisk_UID": "60050768108180ED7000000000000388",
+ "IO_group_id": "0",
+ "IO_group_name": "io_grp0",
+ "mapping_type": "private",
+ "host_cluster_id": "",
+ "host_cluster_name": "",
+ "protocol": "scsi"
+ },
+ {
+ "id": "69",
+ "name": "tesla",
+ "SCSI_id": "0",
+ "host_id": "86",
+ "host_name": "host_x",
+ "vdisk_UID": "60050768108180ED7000000000000388",
+ "IO_group_id": "0",
+ "IO_group_name": "io_grp0",
+ "mapping_type": "private",
+ "host_cluster_id": "",
+ "host_cluster_name": "",
+ "protocol": "scsi"
+ }
+ ]
+ m = IBMSVCMigrate()
+ data = m.get_source_hosts()
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_migration.IBMSVCMigrate.discover_partner_system')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_map_host_vol_remote(self, auth, dps, cmd1):
+ set_module_args({
+ "source_volume": "tesla",
+ "target_volume": "tesla_target",
+ "clustername": "x.x.x.x",
+ "remote_cluster": "Cluster_x.x.x.x",
+ "username": "username",
+ "password": "password",
+ "state": "initiate",
+ "replicate_hosts": True,
+ "remote_username": "remote_username",
+ "remote_password": "remote_password",
+ "relationship_name": "migrate_tesla",
+ "remote_pool": "site2pool1"
+ })
+ dps.return_value = "9.71.42.198"
+ cmd1.return_value = {
+ "id": "2",
+ "message": "Host, id [2], successfully created"
+ }
+ m = IBMSVCMigrate()
+ argument = ['host1']
+ m.map_host_vol_remote(argument)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_migration.IBMSVCMigrate.discover_partner_system')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_return_remote_hosts(self, auth, dps, cmd1):
+ set_module_args({
+ "source_volume": "tesla",
+ "target_volume": "tesla_target",
+ "clustername": "x.x.x.x",
+ "remote_cluster": "Cluster_x.x.x.x",
+ "username": "username",
+ "password": "password",
+ "state": "initiate",
+ "replicate_hosts": True,
+ "remote_username": "remote_username",
+ "remote_password": "remote_password",
+ "relationship_name": "migrate_tesla",
+ "remote_pool": "site2pool1"
+ })
+ dps.return_value = "9.71.42.198"
+ cmd1.return_value = [
+ {
+ "id": "0",
+ "name": "altran-hv-1",
+ "port_count": "2",
+ "iogrp_count": "4",
+ "status": "degraded",
+ "site_id": "",
+ "site_name": "",
+ "host_cluster_id": "",
+ "host_cluster_name": "",
+ "protocol": "scsi",
+ "owner_id": "",
+ "owner_name": "",
+ "portset_id": "",
+ "portset_name": ""
+ },
+ {
+ "id": "1",
+ "name": "altran-esxi-06-iscsi",
+ "port_count": "2",
+ "iogrp_count": "4",
+ "status": "online",
+ "site_id": "1",
+ "site_name": "site1",
+ "host_cluster_id": "",
+ "host_cluster_name": "",
+ "protocol": "scsi",
+ "owner_id": "",
+ "owner_name": "",
+ "portset_id": "",
+ "portset_name": ""
+ },
+ {
+ "id": "3",
+ "name": "altran-esxi-07-iscsi",
+ "port_count": "2",
+ "iogrp_count": "4",
+ "status": "online",
+ "site_id": "2",
+ "site_name": "site2",
+ "host_cluster_id": "",
+ "host_cluster_name": "",
+ "protocol": "scsi",
+ "owner_id": "",
+ "owner_name": "",
+ "portset_id": "",
+ "portset_name": ""
+ }
+ ]
+ m = IBMSVCMigrate()
+ data = m.return_remote_hosts()
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_migration.IBMSVCMigrate.create_remote_hosts')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_replicate_source_hosts(self, auth, cmd1, crr):
+ set_module_args({
+ "source_volume": "tesla",
+ "target_volume": "tesla_target",
+ "clustername": "x.x.x.x",
+ "remote_cluster": "Cluster_x.x.x.x",
+ "username": "username",
+ "password": "password",
+ "state": "initiate",
+ "replicate_hosts": True,
+ "remote_username": "remote_username",
+ "remote_password": "remote_password",
+ "relationship_name": "migrate_tesla",
+ "remote_pool": "site2pool1"
+ })
+ argument = ["tesla"]
+ argument = [
+ {
+ "id": "69",
+ "name": "tesla",
+ "SCSI_id": "3",
+ "host_id": "0",
+ "host_name": "altran-esxi-06-iscsi",
+ "vdisk_UID": "60050768108180ED7000000000000388",
+ "IO_group_id": "0",
+ "IO_group_name": "io_grp0",
+ "mapping_type": "private",
+ "host_cluster_id": "",
+ "host_cluster_name": "",
+ "protocol": "scsi"
+ }
+ ]
+ cmd1.return_value = {
+ "id": "0",
+ "name": "altran-esxi-06-iscsi",
+ "port_count": "2",
+ "type": "generic",
+ "mask": "1111111111111111111111111111111111111111111111111111111111111111",
+ "iogrp_count": "4",
+ "status": "online",
+ "site_id": "1",
+ "site_name": "site1",
+ "host_cluster_id": "",
+ "host_cluster_name": "",
+ "protocol": "scsi",
+ "status_policy": "redundant",
+ "status_site": "all",
+ "nodes": [
+ {
+ "WWPN": "2100000E1EC228B9",
+ "node_logged_in_count": "4",
+ "state": "active"
+ },
+ {
+ "WWPN": "2100000E1EC228B8",
+ "node_logged_in_count": "2",
+ "state": "active"
+ }
+ ],
+ "owner_id": "",
+ "owner_name": "",
+ "portset_id": "",
+ "portset_name": ""
+ }
+ m = IBMSVCMigrate()
+ m.replicate_source_hosts(argument)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_migration.IBMSVCMigrate.map_host_vol_remote')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_migration.IBMSVCMigrate.construct_remote_rest')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_migration.IBMSVCMigrate.return_remote_hosts')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_remote_hosts(self, auth, rrh, crr, src, mhvr):
+ set_module_args({
+ "source_volume": "tesla",
+ "target_volume": "tesla_target",
+ "clustername": "x.x.x.x",
+ "remote_cluster": "Cluster_x.x.x.x",
+ "username": "username",
+ "password": "password",
+ "state": "initiate",
+ "replicate_hosts": True,
+ "remote_username": "remote_username",
+ "remote_password": "remote_password",
+ "relationship_name": "migrate_tesla",
+ "remote_pool": "site2pool1"
+ })
+ argument_1 = {
+ "altran-esxi-06-iscsi": ["2100000E1EC228B9", "2100000E1EC228B8"],
+ "host_x": ["50050768100225E8", "50050768100125E8"]
+ }
+ argument_2 = {}
+ rrh.return_value = [
+ {
+ "id": "0",
+ "name": "altran-hv-1",
+ "port_count": "2",
+ "iogrp_count": "4",
+ "status": "degraded",
+ "site_id": "",
+ "site_name": "",
+ "host_cluster_id": "",
+ "host_cluster_name": "",
+ "protocol": "scsi",
+ "owner_id": "",
+ "owner_name": "",
+ "portset_id": "",
+ "portset_name": ""
+ },
+ {
+ "id": "1",
+ "name": "altran-esxi-06-iscsi",
+ "port_count": "2",
+ "iogrp_count": "4",
+ "status": "online",
+ "site_id": "1",
+ "site_name": "site1",
+ "host_cluster_id": "",
+ "host_cluster_name": "",
+ "protocol": "scsi",
+ "owner_id": "",
+ "owner_name": "",
+ "portset_id": "",
+ "portset_name": ""
+ },
+ {
+ "id": "3",
+ "name": "altran-esxi-07-iscsi",
+ "port_count": "2",
+ "iogrp_count": "4",
+ "status": "online",
+ "site_id": "2",
+ "site_name": "site2",
+ "host_cluster_id": "",
+ "host_cluster_name": "",
+ "protocol": "scsi",
+ "owner_id": "",
+ "owner_name": "",
+ "portset_id": "",
+ "portset_name": ""
+ }
+ ]
+ m = IBMSVCMigrate()
+ m.create_remote_hosts(argument_1, argument_2)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_migration.IBMSVCMigrate.construct_remote_rest')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_vdisk_create(self, auth, crr, cmd1):
+ set_module_args({
+ "source_volume": "tesla",
+ "target_volume": "tesla_target",
+ "clustername": "x.x.x.x",
+ "remote_cluster": "Cluster_x.x.x.x",
+ "username": "username",
+ "password": "password",
+ "state": "initiate",
+ "replicate_hosts": True,
+ "remote_username": "remote_username",
+ "remote_password": "remote_password",
+ "relationship_name": "migrate_tesla",
+ "remote_pool": "site2pool1"
+ })
+ argument = [
+ {
+ "id": "69", "name": "tesla", "IO_group_id": "0", "IO_group_name": "io_grp0",
+ "status": "online", "mdisk_grp_id": "2", "mdisk_grp_name": "site1pool1",
+ "capacity": "10485760", "type": "striped", "formatted": "yes", "formatting": "no",
+ "mdisk_id": "", "mdisk_name": "", "FC_id": "", "FC_name": "", "RC_id": "", "RC_name": "",
+ "vdisk_UID": "60050768108180ED700000000000038E", "preferred_node_id": "1",
+ "fast_write_state": "not_empty", "cache": "readwrite", "udid": "", "fc_map_count": "0",
+ "sync_rate": "50", "copy_count": "1", "se_copy_count": "0", "filesystem": "",
+ "mirror_write_priority": "latency", "RC_change": "no", "compressed_copy_count": "0",
+ "access_IO_group_count": "1", "last_access_time": "210811071953", "parent_mdisk_grp_id": "2",
+ "parent_mdisk_grp_name": "site1pool1", "owner_type": "none", "owner_id": "", "owner_name": "",
+ "encrypt": "no", "volume_id": "69", "volume_name": "tesla", "function": "", "throttle_id": "",
+ "throttle_name": "", "IOPs_limit": "", "bandwidth_limit_MB": "", "volume_group_id": "",
+ "volume_group_name": "", "cloud_backup_enabled": "no", "cloud_account_id": "",
+ "cloud_account_name": "", "backup_status": "off", "last_backup_time": "",
+ "restore_status": "none", "backup_grain_size": "", "deduplicated_copy_count": "0",
+ "protocol": "scsi", "preferred_node_name": "node1", "safeguarded_expiration_time": "",
+ "safeguarded_backup_count": "0"
+ },
+ {
+ "copy_id": "0", "status": "online", "sync": "yes", "auto_delete": "no", "primary": "yes",
+ "mdisk_grp_id": "2", "mdisk_grp_name": "site1pool1", "type": "striped", "mdisk_id": "",
+ "mdisk_name": "", "fast_write_state": "not_empty", "used_capacity": "10485760",
+ "real_capacity": "10485760", "free_capacity": "0", "overallocation": "100", "autoexpand": "",
+ "warning": "", "grainsize": "", "se_copy": "no", "easy_tier": "on",
+ "easy_tier_status": "balanced", "tiers": [
+ {"tier": "tier_scm", "tier_capacity": "0"},
+ {"tier": "tier0_flash", "tier_capacity": "10485760"},
+ {"tier": "tier1_flash", "tier_capacity": "0"},
+ {"tier": "tier_enterprise", "tier_capacity": "0"},
+ {"tier": "tier_nearline", "tier_capacity": "0"}
+ ], "compressed_copy": "no", "uncompressed_used_capacity": "10485760",
+ "parent_mdisk_grp_id": "2", "parent_mdisk_grp_name": "site1pool1", "encrypt": "no",
+ "deduplicated_copy": "no", "used_capacity_before_reduction": "",
+ "safeguarded_mdisk_grp_id": "", "safeguarded_mdisk_grp_name": ""
+ }
+ ]
+ cmd1.return_value = {
+ "id": "77",
+ "message": "Volume, id [77], successfully created"
+ }
+ with pytest.raises(AnsibleFailJson) as exc:
+ m = IBMSVCMigrate()
+ m.vdisk_create(argument)
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_migration.IBMSVCMigrate.construct_remote_rest')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_verify_remote_volume_mapping(self, auth, crr, cmd1):
+ set_module_args({
+ "source_volume": "tesla",
+ "target_volume": "tesla_target",
+ "clustername": "x.x.x.x",
+ "remote_cluster": "Cluster_x.x.x.x",
+ "username": "username",
+ "password": "password",
+ "state": "initiate",
+ "replicate_hosts": True,
+ "remote_username": "remote_username",
+ "remote_password": "remote_password",
+ "relationship_name": "migrate_tesla",
+ "remote_pool": "site2pool1"
+ })
+ cmd1.return_value = None
+ with pytest.raises(AnsibleFailJson) as exc:
+ m = IBMSVCMigrate()
+ m.verify_remote_volume_mapping()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_migration.IBMSVCMigrate.vdisk_create')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_migration.IBMSVCMigrate.get_existing_vdisk')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_verify_target(self, auth, cmd1, cmd2):
+ set_module_args({
+ "source_volume": "tesla",
+ "target_volume": "tesla_target",
+ "clustername": "x.x.x.x",
+ "remote_cluster": "Cluster_x.x.x.x",
+ "username": "username",
+ "password": "password",
+ "state": "initiate",
+ "replicate_hosts": True,
+ "remote_username": "remote_username",
+ "remote_password": "remote_password",
+ "relationship_name": "migrate_tesla",
+ "remote_pool": "site2pool1"
+ })
+ source_data = [
+ {
+ "id": "69", "name": "tesla", "IO_group_id": "0", "IO_group_name": "io_grp0",
+ "status": "online", "mdisk_grp_id": "2", "mdisk_grp_name": "site1pool1",
+ "capacity": "10485760", "type": "striped", "formatted": "yes", "formatting": "no",
+ "mdisk_id": "", "mdisk_name": "", "FC_id": "", "FC_name": "", "RC_id": "",
+ "RC_name": "", "vdisk_UID": "60050768108180ED7000000000000388", "preferred_node_id": "1",
+ "fast_write_state": "not_empty", "cache": "readwrite", "udid": "", "fc_map_count": "0",
+ "sync_rate": "50", "copy_count": "1", "se_copy_count": "0", "filesystem": "",
+ "mirror_write_priority": "latency", "RC_change": "no", "compressed_copy_count": "0",
+ "access_IO_group_count": "1", "last_access_time": "", "parent_mdisk_grp_id": "2",
+ "parent_mdisk_grp_name": "site1pool1", "owner_type": "none", "owner_id": "",
+ "owner_name": "", "encrypt": "no", "volume_id": "69", "volume_name": "tesla",
+ "function": "", "throttle_id": "", "throttle_name": "", "IOPs_limit": "",
+ "bandwidth_limit_MB": "", "volume_group_id": "", "volume_group_name": "",
+ "cloud_backup_enabled": "no", "cloud_account_id": "", "cloud_account_name": "",
+ "backup_status": "off", "last_backup_time": "", "restore_status": "none",
+ "backup_grain_size": "", "deduplicated_copy_count": "0", "protocol": "scsi",
+ "preferred_node_name": "node1", "safeguarded_expiration_time": "",
+ "safeguarded_backup_count": "0"
+ },
+ {
+ "copy_id": "0", "status": "online", "sync": "yes", "auto_delete": "no", "primary": "yes",
+ "mdisk_grp_id": "2", "mdisk_grp_name": "site1pool1", "type": "striped", "mdisk_id": "",
+ "mdisk_name": "", "fast_write_state": "not_empty", "used_capacity": "10485760",
+ "real_capacity": "10485760", "free_capacity": "0", "overallocation": "100",
+ "autoexpand": "", "warning": "", "grainsize": "", "se_copy": "no", "easy_tier": "on",
+ "easy_tier_status": "balanced", "tiers": [
+ {"tier": "tier_scm", "tier_capacity": "0"},
+ {"tier": "tier0_flash", "tier_capacity": "10485760"},
+ {"tier": "tier1_flash", "tier_capacity": "0"},
+ {"tier": "tier_enterprise", "tier_capacity": "0"},
+ {"tier": "tier_nearline", "tier_capacity": "0"}
+ ], "compressed_copy": "no", "uncompressed_used_capacity": "10485760",
+ "parent_mdisk_grp_id": "2", "parent_mdisk_grp_name": "site1pool1", "encrypt": "no",
+ "deduplicated_copy": "no", "used_capacity_before_reduction": "",
+ "safeguarded_mdisk_grp_id": "", "safeguarded_mdisk_grp_name": ""
+ }
+ ]
+ target_data = None
+ cmd1.return_value = source_data, target_data
+ m = IBMSVCMigrate()
+ m.verify_target()
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_migration.IBMSVCMigrate.vdisk_create')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_migration.IBMSVCMigrate.get_existing_vdisk')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_source_volume_absent(self, auth, cmd1, cmd2):
+ set_module_args({
+ "source_volume": "tesla",
+ "target_volume": "tesla_target",
+ "clustername": "x.x.x.x",
+ "remote_cluster": "Cluster_x.x.x.x",
+ "username": "username",
+ "password": "password",
+ "state": "initiate",
+ "replicate_hosts": True,
+ "remote_username": "remote_username",
+ "remote_password": "remote_password",
+ "relationship_name": "migrate_tesla",
+ "remote_pool": "site2pool1"
+ })
+ source_data = None
+ target_data = None
+ cmd1.return_value = source_data, target_data
+ with pytest.raises(AnsibleFailJson) as exc:
+ m = IBMSVCMigrate()
+ m.verify_target()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_migration.IBMSVCMigrate.vdisk_create')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_migration.IBMSVCMigrate.get_existing_vdisk')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_when_source_volume_in_relationship(self, auth, cmd1, cmd2):
+ set_module_args({
+ "source_volume": "tesla",
+ "target_volume": "tesla_target",
+ "clustername": "x.x.x.x",
+ "remote_cluster": "Cluster_x.x.x.x",
+ "username": "username",
+ "password": "password",
+ "state": "initiate",
+ "replicate_hosts": True,
+ "remote_username": "remote_username",
+ "remote_password": "remote_password",
+ "relationship_name": "migrate_tesla",
+ "remote_pool": "site2pool1"
+ })
+ source_data = [
+ {
+ "id": "69", "name": "tesla", "IO_group_id": "0", "IO_group_name": "io_grp0",
+ "status": "online", "mdisk_grp_id": "2", "mdisk_grp_name": "site1pool1",
+ "capacity": "10485760", "type": "striped", "formatted": "yes", "formatting": "no",
+ "mdisk_id": "", "mdisk_name": "", "FC_id": "", "FC_name": "", "RC_id": "",
+ "RC_name": "x", "vdisk_UID": "60050768108180ED7000000000000388", "preferred_node_id": "1",
+ "fast_write_state": "not_empty", "cache": "readwrite", "udid": "", "fc_map_count": "0",
+ "sync_rate": "50", "copy_count": "1", "se_copy_count": "0", "filesystem": "",
+ "mirror_write_priority": "latency", "RC_change": "no", "compressed_copy_count": "0",
+ "access_IO_group_count": "1", "last_access_time": "", "parent_mdisk_grp_id": "2",
+ "parent_mdisk_grp_name": "site1pool1", "owner_type": "none", "owner_id": "",
+ "owner_name": "", "encrypt": "no", "volume_id": "69", "volume_name": "tesla",
+ "function": "", "throttle_id": "", "throttle_name": "", "IOPs_limit": "",
+ "bandwidth_limit_MB": "", "volume_group_id": "", "volume_group_name": "",
+ "cloud_backup_enabled": "no", "cloud_account_id": "", "cloud_account_name": "",
+ "backup_status": "off", "last_backup_time": "", "restore_status": "none",
+ "backup_grain_size": "", "deduplicated_copy_count": "0", "protocol": "scsi",
+ "preferred_node_name": "node1", "safeguarded_expiration_time": "",
+ "safeguarded_backup_count": "0"
+ },
+ {
+ "copy_id": "0", "status": "online", "sync": "yes", "auto_delete": "no", "primary": "yes",
+ "mdisk_grp_id": "2", "mdisk_grp_name": "site1pool1", "type": "striped", "mdisk_id": "",
+ "mdisk_name": "", "fast_write_state": "not_empty", "used_capacity": "10485760",
+ "real_capacity": "10485760", "free_capacity": "0", "overallocation": "100",
+ "autoexpand": "", "warning": "", "grainsize": "", "se_copy": "no", "easy_tier": "on",
+ "easy_tier_status": "balanced", "tiers": [
+ {"tier": "tier_scm", "tier_capacity": "0"},
+ {"tier": "tier0_flash", "tier_capacity": "10485760"},
+ {"tier": "tier1_flash", "tier_capacity": "0"},
+ {"tier": "tier_enterprise", "tier_capacity": "0"},
+ {"tier": "tier_nearline", "tier_capacity": "0"}
+ ], "compressed_copy": "no", "uncompressed_used_capacity": "10485760",
+ "parent_mdisk_grp_id": "2", "parent_mdisk_grp_name": "site1pool1", "encrypt": "no",
+ "deduplicated_copy": "no", "used_capacity_before_reduction": "",
+ "safeguarded_mdisk_grp_id": "", "safeguarded_mdisk_grp_name": ""
+ }
+ ]
+ target_data = None
+ cmd1.return_value = source_data, target_data
+ with pytest.raises(AnsibleFailJson) as exc:
+ m = IBMSVCMigrate()
+ m.verify_target()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_migration.IBMSVCMigrate.vdisk_create')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_migration.IBMSVCMigrate.get_existing_vdisk')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_when_target_volume_in_relationship(self, auth, cmd1, cmd2):
+ set_module_args({
+ "source_volume": "tesla",
+ "target_volume": "tesla_target",
+ "clustername": "x.x.x.x",
+ "remote_cluster": "Cluster_x.x.x.x",
+ "username": "username",
+ "password": "password",
+ "state": "initiate",
+ "replicate_hosts": True,
+ "remote_username": "remote_username",
+ "remote_password": "remote_password",
+ "relationship_name": "migrate_tesla",
+ "remote_pool": "site2pool1"
+ })
+ source_data = [
+ {
+ "id": "69", "name": "tesla", "IO_group_id": "0", "IO_group_name": "io_grp0",
+ "status": "online", "mdisk_grp_id": "2", "mdisk_grp_name": "site1pool1",
+ "capacity": "10485760", "type": "striped", "formatted": "yes", "formatting": "no",
+ "mdisk_id": "", "mdisk_name": "", "FC_id": "", "FC_name": "", "RC_id": "",
+ "RC_name": "", "vdisk_UID": "60050768108180ED7000000000000388", "preferred_node_id": "1",
+ "fast_write_state": "not_empty", "cache": "readwrite", "udid": "", "fc_map_count": "0",
+ "sync_rate": "50", "copy_count": "1", "se_copy_count": "0", "filesystem": "",
+ "mirror_write_priority": "latency", "RC_change": "no", "compressed_copy_count": "0",
+ "access_IO_group_count": "1", "last_access_time": "", "parent_mdisk_grp_id": "2",
+ "parent_mdisk_grp_name": "site1pool1", "owner_type": "none", "owner_id": "",
+ "owner_name": "", "encrypt": "no", "volume_id": "69", "volume_name": "tesla",
+ "function": "", "throttle_id": "", "throttle_name": "", "IOPs_limit": "",
+ "bandwidth_limit_MB": "", "volume_group_id": "", "volume_group_name": "",
+ "cloud_backup_enabled": "no", "cloud_account_id": "", "cloud_account_name": "",
+ "backup_status": "off", "last_backup_time": "", "restore_status": "none",
+ "backup_grain_size": "", "deduplicated_copy_count": "0", "protocol": "scsi",
+ "preferred_node_name": "node1", "safeguarded_expiration_time": "",
+ "safeguarded_backup_count": "0"
+ },
+ {
+ "copy_id": "0", "status": "online", "sync": "yes", "auto_delete": "no", "primary": "yes",
+ "mdisk_grp_id": "2", "mdisk_grp_name": "site1pool1", "type": "striped", "mdisk_id": "",
+ "mdisk_name": "", "fast_write_state": "not_empty", "used_capacity": "10485760",
+ "real_capacity": "10485760", "free_capacity": "0", "overallocation": "100",
+ "autoexpand": "", "warning": "", "grainsize": "", "se_copy": "no", "easy_tier": "on",
+ "easy_tier_status": "balanced", "tiers": [
+ {"tier": "tier_scm", "tier_capacity": "0"},
+ {"tier": "tier0_flash", "tier_capacity": "10485760"},
+ {"tier": "tier1_flash", "tier_capacity": "0"},
+ {"tier": "tier_enterprise", "tier_capacity": "0"},
+ {"tier": "tier_nearline", "tier_capacity": "0"}
+ ], "compressed_copy": "no", "uncompressed_used_capacity": "10485760",
+ "parent_mdisk_grp_id": "2", "parent_mdisk_grp_name": "site1pool1", "encrypt": "no",
+ "deduplicated_copy": "no", "used_capacity_before_reduction": "",
+ "safeguarded_mdisk_grp_id": "", "safeguarded_mdisk_grp_name": ""
+ }
+ ]
+ target_data = [
+ {
+ "id": "69", "name": "tesla", "IO_group_id": "0", "IO_group_name": "io_grp0",
+ "status": "online", "mdisk_grp_id": "2", "mdisk_grp_name": "site1pool1",
+ "capacity": "10485760", "type": "striped", "formatted": "yes", "formatting": "no",
+ "mdisk_id": "", "mdisk_name": "", "FC_id": "", "FC_name": "", "RC_id": "",
+ "RC_name": "x", "vdisk_UID": "60050768108180ED7000000000000388", "preferred_node_id": "1",
+ "fast_write_state": "not_empty", "cache": "readwrite", "udid": "", "fc_map_count": "0",
+ "sync_rate": "50", "copy_count": "1", "se_copy_count": "0", "filesystem": "",
+ "mirror_write_priority": "latency", "RC_change": "no", "compressed_copy_count": "0",
+ "access_IO_group_count": "1", "last_access_time": "", "parent_mdisk_grp_id": "2",
+ "parent_mdisk_grp_name": "site1pool1", "owner_type": "none", "owner_id": "",
+ "owner_name": "", "encrypt": "no", "volume_id": "69", "volume_name": "tesla",
+ "function": "", "throttle_id": "", "throttle_name": "", "IOPs_limit": "",
+ "bandwidth_limit_MB": "", "volume_group_id": "", "volume_group_name": "",
+ "cloud_backup_enabled": "no", "cloud_account_id": "", "cloud_account_name": "",
+ "backup_status": "off", "last_backup_time": "", "restore_status": "none",
+ "backup_grain_size": "", "deduplicated_copy_count": "0", "protocol": "scsi",
+ "preferred_node_name": "node1", "safeguarded_expiration_time": "",
+ "safeguarded_backup_count": "0"
+ },
+ {
+ "copy_id": "0", "status": "online", "sync": "yes", "auto_delete": "no", "primary": "yes",
+ "mdisk_grp_id": "2", "mdisk_grp_name": "site1pool1", "type": "striped", "mdisk_id": "",
+ "mdisk_name": "", "fast_write_state": "not_empty", "used_capacity": "10485760",
+ "real_capacity": "10485760", "free_capacity": "0", "overallocation": "100",
+ "autoexpand": "", "warning": "", "grainsize": "", "se_copy": "no", "easy_tier": "on",
+ "easy_tier_status": "balanced", "tiers": [
+ {"tier": "tier_scm", "tier_capacity": "0"},
+ {"tier": "tier0_flash", "tier_capacity": "10485760"},
+ {"tier": "tier1_flash", "tier_capacity": "0"},
+ {"tier": "tier_enterprise", "tier_capacity": "0"},
+ {"tier": "tier_nearline", "tier_capacity": "0"}
+ ], "compressed_copy": "no", "uncompressed_used_capacity": "10485760",
+ "parent_mdisk_grp_id": "2", "parent_mdisk_grp_name": "site1pool1", "encrypt": "no",
+ "deduplicated_copy": "no", "used_capacity_before_reduction": "",
+ "safeguarded_mdisk_grp_id": "", "safeguarded_mdisk_grp_name": ""
+ }
+ ]
+ cmd1.return_value = source_data, target_data
+ with pytest.raises(AnsibleFailJson) as exc:
+ m = IBMSVCMigrate()
+ m.verify_target()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_migration.IBMSVCMigrate.vdisk_create')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_migration.IBMSVCMigrate.get_existing_vdisk')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_when_target_exist_in_different_size(self, auth, cmd1, cmd2):
+ set_module_args({
+ "source_volume": "tesla",
+ "target_volume": "tesla_target",
+ "clustername": "x.x.x.x",
+ "remote_cluster": "Cluster_x.x.x.x",
+ "username": "username",
+ "password": "password",
+ "state": "initiate",
+ "replicate_hosts": True,
+ "remote_username": "remote_username",
+ "remote_password": "remote_password",
+ "relationship_name": "migrate_tesla",
+ "remote_pool": "site2pool1"
+ })
+ source_data = [
+ {
+ "id": "69", "name": "tesla", "IO_group_id": "0", "IO_group_name": "io_grp0",
+ "status": "online", "mdisk_grp_id": "2", "mdisk_grp_name": "site1pool1",
+ "capacity": "10485760", "type": "striped", "formatted": "yes", "formatting": "no",
+ "mdisk_id": "", "mdisk_name": "", "FC_id": "", "FC_name": "", "RC_id": "",
+ "RC_name": "", "vdisk_UID": "60050768108180ED7000000000000388", "preferred_node_id": "1",
+ "fast_write_state": "not_empty", "cache": "readwrite", "udid": "", "fc_map_count": "0",
+ "sync_rate": "50", "copy_count": "1", "se_copy_count": "0", "filesystem": "",
+ "mirror_write_priority": "latency", "RC_change": "no", "compressed_copy_count": "0",
+ "access_IO_group_count": "1", "last_access_time": "", "parent_mdisk_grp_id": "2",
+ "parent_mdisk_grp_name": "site1pool1", "owner_type": "none", "owner_id": "",
+ "owner_name": "", "encrypt": "no", "volume_id": "69", "volume_name": "tesla",
+ "function": "", "throttle_id": "", "throttle_name": "", "IOPs_limit": "",
+ "bandwidth_limit_MB": "", "volume_group_id": "", "volume_group_name": "",
+ "cloud_backup_enabled": "no", "cloud_account_id": "", "cloud_account_name": "",
+ "backup_status": "off", "last_backup_time": "", "restore_status": "none",
+ "backup_grain_size": "", "deduplicated_copy_count": "0", "protocol": "scsi",
+ "preferred_node_name": "node1", "safeguarded_expiration_time": "",
+ "safeguarded_backup_count": "0"
+ },
+ {
+ "copy_id": "0", "status": "online", "sync": "yes", "auto_delete": "no", "primary": "yes",
+ "mdisk_grp_id": "2", "mdisk_grp_name": "site1pool1", "type": "striped", "mdisk_id": "",
+ "mdisk_name": "", "fast_write_state": "not_empty", "used_capacity": "10485760",
+ "real_capacity": "10485760", "free_capacity": "0", "overallocation": "100",
+ "autoexpand": "", "warning": "", "grainsize": "", "se_copy": "no", "easy_tier": "on",
+ "easy_tier_status": "balanced", "tiers": [
+ {"tier": "tier_scm", "tier_capacity": "0"},
+ {"tier": "tier0_flash", "tier_capacity": "10485760"},
+ {"tier": "tier1_flash", "tier_capacity": "0"},
+ {"tier": "tier_enterprise", "tier_capacity": "0"},
+ {"tier": "tier_nearline", "tier_capacity": "0"}
+ ], "compressed_copy": "no", "uncompressed_used_capacity": "10485760",
+ "parent_mdisk_grp_id": "2", "parent_mdisk_grp_name": "site1pool1", "encrypt": "no",
+ "deduplicated_copy": "no", "used_capacity_before_reduction": "",
+ "safeguarded_mdisk_grp_id": "", "safeguarded_mdisk_grp_name": ""
+ }
+ ]
+ target_data = [
+ {
+ "id": "69", "name": "tesla_target", "IO_group_id": "0", "IO_group_name": "io_grp0",
+ "status": "online", "mdisk_grp_id": "2", "mdisk_grp_name": "site1pool1",
+ "capacity": "10485766", "type": "striped", "formatted": "yes", "formatting": "no",
+ "mdisk_id": "", "mdisk_name": "", "FC_id": "", "FC_name": "", "RC_id": "",
+ "RC_name": "", "vdisk_UID": "60050768108180ED7000000000000388", "preferred_node_id": "1",
+ "fast_write_state": "not_empty", "cache": "readwrite", "udid": "", "fc_map_count": "0",
+ "sync_rate": "50", "copy_count": "1", "se_copy_count": "0", "filesystem": "",
+ "mirror_write_priority": "latency", "RC_change": "no", "compressed_copy_count": "0",
+ "access_IO_group_count": "1", "last_access_time": "", "parent_mdisk_grp_id": "2",
+ "parent_mdisk_grp_name": "site1pool1", "owner_type": "none", "owner_id": "",
+ "owner_name": "", "encrypt": "no", "volume_id": "69", "volume_name": "tesla",
+ "function": "", "throttle_id": "", "throttle_name": "", "IOPs_limit": "",
+ "bandwidth_limit_MB": "", "volume_group_id": "", "volume_group_name": "",
+ "cloud_backup_enabled": "no", "cloud_account_id": "", "cloud_account_name": "",
+ "backup_status": "off", "last_backup_time": "", "restore_status": "none",
+ "backup_grain_size": "", "deduplicated_copy_count": "0", "protocol": "scsi",
+ "preferred_node_name": "node1", "safeguarded_expiration_time": "",
+ "safeguarded_backup_count": "0"
+ },
+ {
+ "copy_id": "0", "status": "online", "sync": "yes", "auto_delete": "no", "primary": "yes",
+ "mdisk_grp_id": "2", "mdisk_grp_name": "site1pool1", "type": "striped", "mdisk_id": "",
+ "mdisk_name": "", "fast_write_state": "not_empty", "used_capacity": "10485760",
+ "real_capacity": "10485760", "free_capacity": "0", "overallocation": "100",
+ "autoexpand": "", "warning": "", "grainsize": "", "se_copy": "no", "easy_tier": "on",
+ "easy_tier_status": "balanced", "tiers": [
+ {"tier": "tier_scm", "tier_capacity": "0"},
+ {"tier": "tier0_flash", "tier_capacity": "10485760"},
+ {"tier": "tier1_flash", "tier_capacity": "0"},
+ {"tier": "tier_enterprise", "tier_capacity": "0"},
+ {"tier": "tier_nearline", "tier_capacity": "0"}
+ ], "compressed_copy": "no", "uncompressed_used_capacity": "10485760",
+ "parent_mdisk_grp_id": "2", "parent_mdisk_grp_name": "site1pool1", "encrypt": "no",
+ "deduplicated_copy": "no", "used_capacity_before_reduction": "",
+ "safeguarded_mdisk_grp_id": "", "safeguarded_mdisk_grp_name": ""
+ }
+ ]
+ cmd1.return_value = source_data, target_data
+ with pytest.raises(AnsibleFailJson) as exc:
+ m = IBMSVCMigrate()
+ m.verify_target()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_relationsip(self, auth, cmd1):
+ set_module_args({
+ "source_volume": "tesla",
+ "target_volume": "tesla_target",
+ "clustername": "x.x.x.x",
+ "remote_cluster": "Cluster_x.x.x.x",
+ "username": "username",
+ "password": "password",
+ "state": "initiate",
+ "replicate_hosts": True,
+ "remote_username": "remote_username",
+ "remote_password": "remote_password",
+ "relationship_name": "migrate_tesla",
+ "remote_pool": "site2pool1"
+ })
+ cmd1.return_value = {
+ "id": "69",
+ "message": "RC Relationship, id [69], successfully created"
+ }
+ m = IBMSVCMigrate()
+ m.create_relationship()
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_migration.IBMSVCMigrate.get_existing_vdisk')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_vol_relationsip(self, auth, cmd1, cmd2):
+ set_module_args({
+ "clustername": "x.x.x.x",
+ "username": "username",
+ "password": "password",
+ "state": "cleanup",
+ "source_volume": "tesla"
+ })
+ source_data = [
+ {
+ "id": "69", "name": "tesla", "IO_group_id": "0", "IO_group_name": "io_grp0",
+ "status": "online", "mdisk_grp_id": "2", "mdisk_grp_name": "site1pool1",
+ "capacity": "10485760", "type": "striped", "formatted": "yes", "formatting": "no",
+ "mdisk_id": "", "mdisk_name": "", "FC_id": "", "FC_name": "", "RC_id": "",
+ "RC_name": "", "vdisk_UID": "60050768108180ED7000000000000388", "preferred_node_id": "1",
+ "fast_write_state": "not_empty", "cache": "readwrite", "udid": "", "fc_map_count": "0",
+ "sync_rate": "50", "copy_count": "1", "se_copy_count": "0", "filesystem": "",
+ "mirror_write_priority": "latency", "RC_change": "no", "compressed_copy_count": "0",
+ "access_IO_group_count": "1", "last_access_time": "", "parent_mdisk_grp_id": "2",
+ "parent_mdisk_grp_name": "site1pool1", "owner_type": "none", "owner_id": "",
+ "owner_name": "", "encrypt": "no", "volume_id": "69", "volume_name": "tesla",
+ "function": "", "throttle_id": "", "throttle_name": "", "IOPs_limit": "",
+ "bandwidth_limit_MB": "", "volume_group_id": "", "volume_group_name": "",
+ "cloud_backup_enabled": "no", "cloud_account_id": "", "cloud_account_name": "",
+ "backup_status": "off", "last_backup_time": "", "restore_status": "none",
+ "backup_grain_size": "", "deduplicated_copy_count": "0", "protocol": "scsi",
+ "preferred_node_name": "node1", "safeguarded_expiration_time": "",
+ "safeguarded_backup_count": "0"
+ },
+ {
+ "copy_id": "0", "status": "online", "sync": "yes", "auto_delete": "no", "primary": "yes",
+ "mdisk_grp_id": "2", "mdisk_grp_name": "site1pool1", "type": "striped", "mdisk_id": "",
+ "mdisk_name": "", "fast_write_state": "not_empty", "used_capacity": "10485760",
+ "real_capacity": "10485760", "free_capacity": "0", "overallocation": "100",
+ "autoexpand": "", "warning": "", "grainsize": "", "se_copy": "no", "easy_tier": "on",
+ "easy_tier_status": "balanced", "tiers": [
+ {"tier": "tier_scm", "tier_capacity": "0"},
+ {"tier": "tier0_flash", "tier_capacity": "10485760"},
+ {"tier": "tier1_flash", "tier_capacity": "0"},
+ {"tier": "tier_enterprise", "tier_capacity": "0"},
+ {"tier": "tier_nearline", "tier_capacity": "0"}
+ ], "compressed_copy": "no", "uncompressed_used_capacity": "10485760",
+ "parent_mdisk_grp_id": "2", "parent_mdisk_grp_name": "site1pool1", "encrypt": "no",
+ "deduplicated_copy": "no", "used_capacity_before_reduction": "",
+ "safeguarded_mdisk_grp_id": "", "safeguarded_mdisk_grp_name": ""
+ }
+ ]
+ target_data = None
+ cmd1.return_value = source_data, target_data
+ cmd2.return_value = {
+ "id": "69",
+ "name": "migrate_tesla",
+ "master_cluster_id": "0000020420603B5C",
+ "master_cluster_name": "Cluster_x.x.x.x",
+ "master_vdisk_id": "69",
+ "master_vdisk_name": "tesla",
+ "aux_cluster_id": "0000010022206192",
+ "aux_cluster_name": "Cluster_x.x.x.x",
+ "aux_vdisk_id": "77",
+ "aux_vdisk_name": "tesla_target",
+ "primary": "aux",
+ "consistency_group_id": "",
+ "consistency_group_name": "",
+ "state": "consistent_synchronized",
+ "bg_copy_priority": "50",
+ "progress": "",
+ "freeze_time": "",
+ "status": "online",
+ "sync": "",
+ "copy_type": "migration",
+ "cycling_mode": "",
+ "cycle_period_seconds": "300",
+ "master_change_vdisk_id": "",
+ "master_change_vdisk_name": "",
+ "aux_change_vdisk_id": "",
+ "aux_change_vdisk_name": "",
+ "previous_primary": "",
+ "channel": "none"
+ }
+ with pytest.raises(AnsibleFailJson) as exc:
+ m = IBMSVCMigrate()
+ m.source_vol_relationship("tesla")
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_migration.IBMSVCMigrate.get_existing_vdisk')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_vol_relationsip_fail_when_source_missng(self, auth, cmd1, cmd2):
+ set_module_args({
+ "clustername": "x.x.x.x",
+ "username": "username",
+ "password": "password",
+ "state": "cleanup",
+ "source_volume": "tesla"
+ })
+ source_data = None
+ target_data = None
+ cmd1.return_value = source_data, target_data
+ cmd2.return_value = {
+ "id": "69",
+ "name": "migrate_tesla",
+ "master_cluster_id": "0000020420603B5C",
+ "master_cluster_name": "Cluster_x.x.x.x",
+ "master_vdisk_id": "69",
+ "master_vdisk_name": "tesla",
+ "aux_cluster_id": "0000010022206192",
+ "aux_cluster_name": "Cluster_x.x.x.x",
+ "aux_vdisk_id": "77",
+ "aux_vdisk_name": "tesla_target",
+ "primary": "aux",
+ "consistency_group_id": "",
+ "consistency_group_name": "",
+ "state": "consistent_synchronized",
+ "bg_copy_priority": "50",
+ "progress": "",
+ "freeze_time": "",
+ "status": "online",
+ "sync": "",
+ "copy_type": "migration",
+ "cycling_mode": "",
+ "cycle_period_seconds": "300",
+ "master_change_vdisk_id": "",
+ "master_change_vdisk_name": "",
+ "aux_change_vdisk_id": "",
+ "aux_change_vdisk_name": "",
+ "previous_primary": "",
+ "channel": "none"
+ }
+ with pytest.raises(AnsibleExitJson) as exc:
+ m = IBMSVCMigrate()
+ m.source_vol_relationship("tesla")
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_migration.IBMSVCMigrate.get_existing_vdisk')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_vol_relationsip_fail_when_source_copytype_not_migration(self, auth, cmd1, cmd2):
+ set_module_args({
+ "clustername": "x.x.x.x",
+ "username": "username",
+ "password": "password",
+ "state": "cleanup",
+ "source_volume": "tesla"
+ })
+ source_data = [
+ {
+ "id": "69", "name": "tesla", "IO_group_id": "0", "IO_group_name": "io_grp0",
+ "status": "online", "mdisk_grp_id": "2", "mdisk_grp_name": "site1pool1",
+ "capacity": "10485760", "type": "striped", "formatted": "yes", "formatting": "no",
+ "mdisk_id": "", "mdisk_name": "", "FC_id": "", "FC_name": "", "RC_id": "",
+ "RC_name": "", "vdisk_UID": "60050768108180ED7000000000000388", "preferred_node_id": "1",
+ "fast_write_state": "not_empty", "cache": "readwrite", "udid": "", "fc_map_count": "0",
+ "sync_rate": "50", "copy_count": "1", "se_copy_count": "0", "filesystem": "",
+ "mirror_write_priority": "latency", "RC_change": "no", "compressed_copy_count": "0",
+ "access_IO_group_count": "1", "last_access_time": "", "parent_mdisk_grp_id": "2",
+ "parent_mdisk_grp_name": "site1pool1", "owner_type": "none", "owner_id": "",
+ "owner_name": "", "encrypt": "no", "volume_id": "69", "volume_name": "tesla",
+ "function": "", "throttle_id": "", "throttle_name": "", "IOPs_limit": "",
+ "bandwidth_limit_MB": "", "volume_group_id": "", "volume_group_name": "",
+ "cloud_backup_enabled": "no", "cloud_account_id": "", "cloud_account_name": "",
+ "backup_status": "off", "last_backup_time": "", "restore_status": "none",
+ "backup_grain_size": "", "deduplicated_copy_count": "0", "protocol": "scsi",
+ "preferred_node_name": "node1", "safeguarded_expiration_time": "",
+ "safeguarded_backup_count": "0"
+ },
+ {
+ "copy_id": "0", "status": "online", "sync": "yes", "auto_delete": "no", "primary": "yes",
+ "mdisk_grp_id": "2", "mdisk_grp_name": "site1pool1", "type": "striped", "mdisk_id": "",
+ "mdisk_name": "", "fast_write_state": "not_empty", "used_capacity": "10485760",
+ "real_capacity": "10485760", "free_capacity": "0", "overallocation": "100",
+ "autoexpand": "", "warning": "", "grainsize": "", "se_copy": "no", "easy_tier": "on",
+ "easy_tier_status": "balanced", "tiers": [
+ {"tier": "tier_scm", "tier_capacity": "0"},
+ {"tier": "tier0_flash", "tier_capacity": "10485760"},
+ {"tier": "tier1_flash", "tier_capacity": "0"},
+ {"tier": "tier_enterprise", "tier_capacity": "0"},
+ {"tier": "tier_nearline", "tier_capacity": "0"}
+ ], "compressed_copy": "no", "uncompressed_used_capacity": "10485760",
+ "parent_mdisk_grp_id": "2", "parent_mdisk_grp_name": "site1pool1", "encrypt": "no",
+ "deduplicated_copy": "no", "used_capacity_before_reduction": "",
+ "safeguarded_mdisk_grp_id": "", "safeguarded_mdisk_grp_name": ""
+ }
+ ]
+ target_data = None
+ cmd1.return_value = source_data, target_data
+ cmd2.return_value = {
+ "id": "69",
+ "name": "migrate_tesla",
+ "master_cluster_id": "0000020420603B5C",
+ "master_cluster_name": "Cluster_x.x.x.x",
+ "master_vdisk_id": "69",
+ "master_vdisk_name": "tesla",
+ "aux_cluster_id": "0000010022206192",
+ "aux_cluster_name": "Cluster_x.x.x.x",
+ "aux_vdisk_id": "77",
+ "aux_vdisk_name": "tesla_target",
+ "primary": "aux",
+ "consistency_group_id": "",
+ "consistency_group_name": "",
+ "state": "consistent_synchronized",
+ "bg_copy_priority": "50",
+ "progress": "",
+ "freeze_time": "",
+ "status": "online",
+ "sync": "",
+ "copy_type": "",
+ "cycling_mode": "",
+ "cycle_period_seconds": "300",
+ "master_change_vdisk_id": "",
+ "master_change_vdisk_name": "",
+ "aux_change_vdisk_id": "",
+ "aux_change_vdisk_name": "",
+ "previous_primary": "",
+ "channel": "none"
+ }
+ with pytest.raises(AnsibleFailJson) as exc:
+ m = IBMSVCMigrate()
+ m.source_vol_relationship("tesla")
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_existing_rc(self, auth, cmd1):
+ set_module_args({
+ "source_volume": "tesla",
+ "target_volume": "tesla_target",
+ "clustername": "x.x.x.x",
+ "remote_cluster": "Cluster_x.x.x.x",
+ "username": "username",
+ "password": "password",
+ "state": "initiate",
+ "replicate_hosts": True,
+ "remote_username": "remote_username",
+ "remote_password": "remote_password",
+ "relationship_name": "migrate_tesla",
+ "remote_pool": "site2pool1"
+ })
+ cmd1.return_value = None
+ m = IBMSVCMigrate()
+ m.existing_rc()
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_verify_existing_rel(self, auth):
+ set_module_args({
+ "source_volume": "tesla",
+ "target_volume": "tesla_target",
+ "clustername": "x.x.x.x",
+ "remote_cluster": "Cluster_x.x.x.x",
+ "username": "username",
+ "password": "password",
+ "state": "initiate",
+ "replicate_hosts": True,
+ "remote_username": "remote_username",
+ "remote_password": "remote_password",
+ "relationship_name": "migrate_tesla",
+ "remote_pool": "site2pool1"
+ })
+ argument = {
+ "id": "69",
+ "name": "migrate_tesla",
+ "master_cluster_id": "0000020420603B5C",
+ "master_cluster_name": "Cluster_x.x.x.x",
+ "master_vdisk_id": "69",
+ "master_vdisk_name": "tesla",
+ "aux_cluster_id": "0000010022206192",
+ "aux_cluster_name": "Cluster_x.x.x.x",
+ "aux_vdisk_id": "77",
+ "aux_vdisk_name": "tesla_target",
+ "primary": "aux",
+ "consistency_group_id": "",
+ "consistency_group_name": "",
+ "state": "consistent_synchronized",
+ "bg_copy_priority": "50",
+ "progress": "",
+ "freeze_time": "",
+ "status": "online",
+ "sync": "",
+ "copy_type": "migration",
+ "cycling_mode": "",
+ "cycle_period_seconds": "300",
+ "master_change_vdisk_id": "",
+ "master_change_vdisk_name": "",
+ "aux_change_vdisk_id": "",
+ "aux_change_vdisk_name": "",
+ "previous_primary": "",
+ "channel": "none"
+ }
+ m = IBMSVCMigrate()
+ m.verify_existing_rel(argument)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_start_relationship(self, auth, cmd1):
+ set_module_args({
+ "source_volume": "tesla",
+ "target_volume": "tesla_target",
+ "clustername": "x.x.x.x",
+ "remote_cluster": "Cluster_x.x.x.x",
+ "username": "username",
+ "password": "password",
+ "state": "initiate",
+ "replicate_hosts": True,
+ "remote_username": "remote_username",
+ "remote_password": "remote_password",
+ "relationship_name": "migrate_tesla",
+ "remote_pool": "site2pool1"
+ })
+ cmd1.return_value = ''
+ m = IBMSVCMigrate()
+ m.start_relationship()
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_switch(self, auth, cmd1):
+ set_module_args({
+ "source_volume": "tesla",
+ "target_volume": "tesla_target",
+ "clustername": "x.x.x.x",
+ "remote_cluster": "Cluster_x.x.x.x",
+ "username": "username",
+ "password": "password",
+ "state": "initiate",
+ "replicate_hosts": True,
+ "remote_username": "remote_username",
+ "remote_password": "remote_password",
+ "relationship_name": "migrate_tesla",
+ "remote_pool": "site2pool1"
+ })
+ cmd1.return_value = ''
+ m = IBMSVCMigrate()
+ m.switch()
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_delete(self, auth, cmd1):
+ set_module_args({
+ "clustername": "x.x.x.x",
+ "username": "username",
+ "password": "password",
+ "state": "cleanup",
+ "source_volume": "tesla"
+ })
+ cmd1.return_value = ''
+ m = IBMSVCMigrate()
+ m.delete()
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_migrate_across_pool_without_mandatory_params(self, svc_authorize_mock, svc_obj_info_mock, svc_run_command_mock):
+ set_module_args({
+ "clustername": "x.x.x.x",
+ "username": "username",
+ "password": "password",
+ "type_of_migration": "across_pools"
+ })
+
+ m = IBMSVCMigrate()
+ with pytest.raises(AnsibleFailJson) as exc:
+ m.apply()
+ self.assertEqual(
+ 'Missing mandatory parameter: [new_pool, source_volume] for migration across pools',
+ exc.value.args[0]['msg']
+ )
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_migrate_across_pool(self, svc_authorize_mock, svc_obj_info_mock, svc_run_command_mock):
+ set_module_args({
+ "clustername": "x.x.x.x",
+ "username": "username",
+ "password": "password",
+ "type_of_migration": "across_pools",
+ "new_pool": "pool0",
+ "source_volume": "vol1"
+ })
+
+ svc_obj_info_mock.return_value = [{'name': 'vol1', 'mdisk_grp_name': 'pool1'}, {}]
+ svc_run_command_mock.return_value = ''
+ m = IBMSVCMigrate()
+ with pytest.raises(AnsibleExitJson) as exc:
+ m.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_migrate_across_pool_idempotency(self, svc_authorize_mock, svc_obj_info_mock, svc_run_command_mock):
+ set_module_args({
+ "clustername": "x.x.x.x",
+ "username": "username",
+ "password": "password",
+ "type_of_migration": "across_pools",
+ "new_pool": "pool0",
+ "source_volume": "vol1"
+ })
+
+ svc_obj_info_mock.return_value = [{'name': 'vol1', 'mdisk_grp_name': 'pool0'}, {}]
+ m = IBMSVCMigrate()
+ with pytest.raises(AnsibleExitJson) as exc:
+ m.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_mirrored_volume.py b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_mirrored_volume.py
new file mode 100644
index 000000000..ec4af334a
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_mirrored_volume.py
@@ -0,0 +1,725 @@
+# Copyright (C) 2020 IBM CORPORATION
+# Author(s):
+#
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+""" unit tests IBM Storage Virtualize Ansible module: ibm_svc_manage_mirrored_volume """
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import unittest
+import pytest
+import json
+from mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi
+from ansible_collections.ibm.storage_virtualize.plugins.modules.ibm_svc_manage_mirrored_volume import IBMSVCvolume
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation """
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the
+ test case """
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the
+ test case """
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an
+ exception """
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an
+ exception """
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class TestIBMSVCvolume(unittest.TestCase):
+ """ a group of related Unit Tests"""
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def setUp(self, connect):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.restapi = IBMSVCRestApi(self.mock_module_helper, '1.2.3.4',
+ 'domain.ibm.com', 'username', 'password',
+ False, 'test.log', '')
+
+ def set_default_args(self):
+ return dict({
+ 'name': 'test',
+ 'state': 'present'
+ })
+
+ def test_module_fail_when_required_args_missing(self):
+ """ required arguments are reported as errors """
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ IBMSVCvolume()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_get_existing_vdisk(self, svc_authorize_mock, svc_obj_info_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'type': 'local hyperswap',
+ 'name': 'test_vol',
+ 'state': 'present',
+ 'poolA': 'Pool1',
+ 'poolB': 'Pool2',
+ 'size': '1024'
+ })
+ svc_obj_info_mock.return_value = [
+ {
+ 'id': '86', 'name': 'test_vol', 'IO_group_id': '0', 'IO_group_name': 'io_grp0',
+ 'status': 'online', 'mdisk_grp_id': '2', 'mdisk_grp_name': 'Pool1', 'capacity': '1.00GB',
+ 'type': 'striped', 'formatted': 'no', 'formatting': 'yes', 'mdisk_id': '', 'mdisk_name': '',
+ 'FC_id': 'many', 'FC_name': 'many', 'RC_id': '86', 'RC_name': 'rcrel14',
+ 'vdisk_UID': '60050764008581864800000000000675', 'preferred_node_id': '1',
+ 'fast_write_state': 'not_empty', 'cache': 'readwrite', 'udid': '', 'fc_map_count': '2',
+ 'sync_rate': '50', 'copy_count': '1', 'se_copy_count': '0', 'filesystem': '',
+ 'mirror_write_priority': 'latency', 'RC_change': 'no', 'compressed_copy_count': '0',
+ 'access_IO_group_count': '2', 'last_access_time': '', 'parent_mdisk_grp_id': '2',
+ 'parent_mdisk_grp_name': 'Pool1', 'owner_type': 'none', 'owner_id': '', 'owner_name': '',
+ 'encrypt': 'no', 'volume_id': '86', 'volume_name': 'test_vol', 'function': 'master',
+ 'throttle_id': '', 'throttle_name': '', 'IOPs_limit': '', 'bandwidth_limit_MB': '',
+ 'volume_group_id': '', 'volume_group_name': '', 'cloud_backup_enabled': 'no', 'cloud_account_id': '',
+ 'cloud_account_name': '', 'backup_status': 'off', 'last_backup_time': '', 'restore_status': 'none',
+ 'backup_grain_size': '', 'deduplicated_copy_count': '0', 'protocol': ''
+ },
+ {
+ 'copy_id': '0', 'status': 'online', 'sync': 'yes', 'auto_delete': 'no', 'primary': 'yes',
+ 'mdisk_grp_id': '2', 'mdisk_grp_name': 'Pool1', 'type': 'striped', 'mdisk_id': '', 'mdisk_name': '',
+ 'fast_write_state': 'not_empty', 'used_capacity': '1.00GB', 'real_capacity': '1.00GB',
+ 'free_capacity': '0.00MB', 'overallocation': '100', 'autoexpand': '', 'warning': '', 'grainsize': '',
+ 'se_copy': 'no', 'easy_tier': 'on', 'easy_tier_status': 'balanced',
+ 'tiers': [
+ {'tier': 'tier_scm', 'tier_capacity': '0.00MB'},
+ {'tier': 'tier0_flash', 'tier_capacity': '0.00MB'},
+ {'tier': 'tier1_flash', 'tier_capacity': '0.00MB'},
+ {'tier': 'tier_enterprise', 'tier_capacity': '1.00GB'},
+ {'tier': 'tier_nearline', 'tier_capacity': '0.00MB'}
+ ],
+ 'compressed_copy': 'no', 'uncompressed_used_capacity': '1.00GB', 'parent_mdisk_grp_id': '2',
+ 'parent_mdisk_grp_name': 'Pool1', 'encrypt': 'no', 'deduplicated_copy': 'no', 'used_capacity_before_reduction': ''
+ }
+ ]
+ obj = IBMSVCvolume()
+ data = obj.get_existing_vdisk()
+ self.assertEqual('test_vol', data[0]["name"])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_basic_checks(self, svc_authorize_mock, svc_obj_info_mock1, svc_obj_info_mock2):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'type': 'local hyperswap',
+ 'name': 'test_vol',
+ 'poolA': 'Pool1',
+ 'poolB': 'Pool2',
+ 'size': '1024'
+ })
+ vdisk_data = [
+ {
+ 'id': '86', 'name': 'test_vol', 'IO_group_id': '0', 'IO_group_name': 'io_grp0',
+ 'status': 'online', 'mdisk_grp_id': '2', 'mdisk_grp_name': 'Pool1', 'capacity': '1.00GB',
+ 'type': 'striped', 'formatted': 'no', 'formatting': 'yes', 'mdisk_id': '', 'mdisk_name': '',
+ 'FC_id': 'many', 'FC_name': 'many', 'RC_id': '86', 'RC_name': 'rcrel14',
+ 'vdisk_UID': '60050764008581864800000000000675', 'preferred_node_id': '1',
+ 'fast_write_state': 'not_empty', 'cache': 'readwrite', 'udid': '', 'fc_map_count': '2',
+ 'sync_rate': '50', 'copy_count': '1', 'se_copy_count': '0', 'filesystem': '',
+ 'mirror_write_priority': 'latency', 'RC_change': 'no', 'compressed_copy_count': '0',
+ 'access_IO_group_count': '2', 'last_access_time': '', 'parent_mdisk_grp_id': '2',
+ 'parent_mdisk_grp_name': 'Pool1', 'owner_type': 'none', 'owner_id': '', 'owner_name': '',
+ 'encrypt': 'no', 'volume_id': '86', 'volume_name': 'test_vol', 'function': 'master',
+ 'throttle_id': '', 'throttle_name': '', 'IOPs_limit': '', 'bandwidth_limit_MB': '',
+ 'volume_group_id': '', 'volume_group_name': '', 'cloud_backup_enabled': 'no', 'cloud_account_id': '',
+ 'cloud_account_name': '', 'backup_status': 'off', 'last_backup_time': '', 'restore_status': 'none',
+ 'backup_grain_size': '', 'deduplicated_copy_count': '0', 'protocol': ''
+ },
+ {
+ 'copy_id': '0', 'status': 'online', 'sync': 'yes', 'auto_delete': 'no', 'primary': 'yes',
+ 'mdisk_grp_id': '2', 'mdisk_grp_name': 'Pool1', 'type': 'striped', 'mdisk_id': '', 'mdisk_name': '',
+ 'fast_write_state': 'not_empty', 'used_capacity': '1.00GB', 'real_capacity': '1.00GB',
+ 'free_capacity': '0.00MB', 'overallocation': '100', 'autoexpand': '', 'warning': '', 'grainsize': '',
+ 'se_copy': 'no', 'easy_tier': 'on', 'easy_tier_status': 'balanced',
+ 'tiers': [
+ {'tier': 'tier_scm', 'tier_capacity': '0.00MB'},
+ {'tier': 'tier0_flash', 'tier_capacity': '0.00MB'},
+ {'tier': 'tier1_flash', 'tier_capacity': '0.00MB'},
+ {'tier': 'tier_enterprise', 'tier_capacity': '1.00GB'},
+ {'tier': 'tier_nearline', 'tier_capacity': '0.00MB'}
+ ],
+ 'compressed_copy': 'no', 'uncompressed_used_capacity': '1.00GB', 'parent_mdisk_grp_id': '2',
+ 'parent_mdisk_grp_name': 'Pool1', 'encrypt': 'no', 'deduplicated_copy': 'no', 'used_capacity_before_reduction': ''
+ }
+ ]
+ svc_obj_info_mock1.return_value = {
+ 'id': '2', 'name': 'Pool1', 'status': 'online', 'mdisk_count': '1', 'vdisk_count': '30',
+ 'capacity': '553.00GB', 'extent_size': '1024', 'free_capacity': '474.00GB', 'virtual_capacity': '6.73GB',
+ 'used_capacity': '1.08GB', 'real_capacity': '51.47GB', 'overallocation': '1', 'warning': '80',
+ 'easy_tier': 'auto', 'easy_tier_status': 'balanced',
+ 'tiers': [
+ {
+ 'tier': 'tier_scm', 'tier_mdisk_count': '0', 'tier_capacity': '0.00MB', 'tier_free_capacity': '0.00MB'
+ },
+ {
+ 'tier': 'tier0_flash', 'tier_mdisk_count': '0', 'tier_capacity': '0.00MB', 'tier_free_capacity': '0.00MB'
+ },
+ {
+ 'tier': 'tier1_flash', 'tier_mdisk_count': '0', 'tier_capacity': '0.00MB', 'tier_free_capacity': '0.00MB'
+ },
+ {
+ 'tier': 'tier_enterprise', 'tier_mdisk_count': '1', 'tier_capacity': '553.00GB', 'tier_free_capacity': '474.00GB'
+ },
+ {
+ 'tier': 'tier_nearline', 'tier_mdisk_count': '0', 'tier_capacity': '0.00MB', 'tier_free_capacity': '0.00MB'
+ }
+ ],
+ 'compression_active': 'yes', 'compression_virtual_capacity': '200.00MB', 'compression_compressed_capacity': '0.31MB',
+ 'compression_uncompressed_capacity': '0.00MB', 'site_id': '1', 'site_name': 'site1', 'parent_mdisk_grp_id': '2',
+ 'parent_mdisk_grp_name': 'Pool1', 'child_mdisk_grp_count': '0', 'child_mdisk_grp_capacity': '0.00MB',
+ 'type': 'parent', 'encrypt': 'no', 'owner_type': 'none', 'owner_id': '', 'owner_name': '', 'data_reduction': 'no',
+ 'used_capacity_before_reduction': '0.00MB', 'used_capacity_after_reduction': '0.00MB', 'overhead_capacity': '0.00MB',
+ 'deduplication_capacity_saving': '0.00MB', 'reclaimable_capacity': '0.00MB', 'physical_capacity': '553.00GB',
+ 'physical_free_capacity': '474.00GB', 'shared_resources': 'no', 'vdisk_protection_enabled': 'yes',
+ 'vdisk_protection_status': 'inactive', 'easy_tier_fcm_over_allocation_max': '', 'auto_expand': 'no',
+ 'auto_expand_max_capacity': '0.00MB'
+ }
+ svc_obj_info_mock2.return_value = {
+ 'id': '3', 'name': 'Pool2', 'status': 'online', 'mdisk_count': '1', 'vdisk_count': '27', 'capacity': '2.72TB',
+ 'extent_size': '1024', 'free_capacity': '2.65TB', 'virtual_capacity': '6.44GB', 'used_capacity': '910.38MB',
+ 'real_capacity': '51.25GB', 'overallocation': '0', 'warning': '80', 'easy_tier': 'auto', 'easy_tier_status': 'balanced',
+ 'tiers': [
+ {
+ 'tier': 'tier_scm', 'tier_mdisk_count': '0', 'tier_capacity': '0.00MB', 'tier_free_capacity': '0.00MB'
+ },
+ {
+ 'tier': 'tier0_flash', 'tier_mdisk_count': '0', 'tier_capacity': '0.00MB', 'tier_free_capacity': '0.00MB'
+ },
+ {
+ 'tier': 'tier1_flash', 'tier_mdisk_count': '0', 'tier_capacity': '0.00MB', 'tier_free_capacity': '0.00MB'
+ },
+ {
+ 'tier': 'tier_enterprise', 'tier_mdisk_count': '0', 'tier_capacity': '0.00MB', 'tier_free_capacity': '0.00MB'
+ },
+ {
+ 'tier': 'tier_nearline', 'tier_mdisk_count': '1', 'tier_capacity': '2.72TB', 'tier_free_capacity': '2.65TB'
+ }
+ ], 'compression_active': 'no', 'compression_virtual_capacity': '0.00MB', 'compression_compressed_capacity': '0.00MB',
+ 'compression_uncompressed_capacity': '0.00MB', 'site_id': '2', 'site_name': 'site2',
+ 'parent_mdisk_grp_id': '3', 'parent_mdisk_grp_name': 'Pool2', 'child_mdisk_grp_count': '0',
+ 'child_mdisk_grp_capacity': '0.00MB', 'type': 'parent', 'encrypt': 'no', 'owner_type': 'none',
+ 'owner_id': '', 'owner_name': '', 'data_reduction': 'no', 'used_capacity_before_reduction': '0.00MB',
+ 'used_capacity_after_reduction': '0.00MB', 'overhead_capacity': '0.00MB', 'deduplication_capacity_saving': '0.00MB',
+ 'reclaimable_capacity': '0.00MB', 'physical_capacity': '2.72TB', 'physical_free_capacity': '2.65TB',
+ 'shared_resources': 'no', 'vdisk_protection_enabled': 'yes', 'vdisk_protection_status': 'inactive',
+ 'easy_tier_fcm_over_allocation_max': '', 'auto_expand': 'no', 'auto_expand_max_capacity': '0.00MB'
+ }
+ obj = IBMSVCvolume()
+ data = obj.basic_checks(vdisk_data)
+ self.assertEqual(None, data)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_discover_site_from_pools(self, svc_authorize_mock, soi1, soi2):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'type': 'local hyperswap',
+ 'name': 'test_vol',
+ 'state': 'present',
+ 'poolA': 'Pool1',
+ 'poolB': 'Pool2',
+ 'size': '1024'
+ })
+ soi1.return_value = {
+ "id": "2", "name": "Pool1", "status": "online", "mdisk_count": "1", "vdisk_count": "32",
+ "capacity": "553.00GB", "extent_size": "1024", "free_capacity": "472.00GB",
+ "virtual_capacity": "8.73GB", "used_capacity": "2.08GB", "real_capacity": "52.48GB", "overallocation": "1",
+ "warning": "80", "easy_tier": "auto", "easy_tier_status": "balanced",
+ "tiers": [
+ {"tier": "tier_scm", "tier_mdisk_count": "0", "tier_capacity": "0.00MB", "tier_free_capacity": "0.00MB"},
+ {"tier": "tier0_flash", "tier_mdisk_count": "0", "tier_capacity": "0.00MB", "tier_free_capacity": "0.00MB"},
+ {"tier": "tier1_flash", "tier_mdisk_count": "0", "tier_capacity": "0.00MB", "tier_free_capacity": "0.00MB"},
+ {"tier": "tier_enterprise", "tier_mdisk_count": "1", "tier_capacity": "553.00GB", "tier_free_capacity": "472.00GB"},
+ {"tier": "tier_nearline", "tier_mdisk_count": "0", "tier_capacity": "0.00MB", "tier_free_capacity": "0.00MB"}
+ ],
+ "compression_active": "yes", "compression_virtual_capacity": "200.00MB",
+ "compression_compressed_capacity": "0.31MB", "compression_uncompressed_capacity": "0.00MB", "site_id": "1",
+ "site_name": "site1", "parent_mdisk_grp_id": "2", "parent_mdisk_grp_name": "Pool1", "child_mdisk_grp_count": "0",
+ "child_mdisk_grp_capacity": "0.00MB", "type": "parent", "encrypt": "no", "owner_type": "none", "owner_id": "",
+ "owner_name": "", "data_reduction": "no", "used_capacity_before_reduction": "0.00MB", "used_capacity_after_reduction": "0.00MB",
+ "overhead_capacity": "0.00MB", "deduplication_capacity_saving": "0.00MB", "reclaimable_capacity": "0.00MB",
+ "physical_capacity": "553.00GB", "physical_free_capacity": "472.00GB", "shared_resources": "no", "vdisk_protection_enabled": "yes",
+ "vdisk_protection_status": "inactive", "easy_tier_fcm_over_allocation_max": "", "auto_expand": "no", "auto_expand_max_capacity": "0.00MB"
+ }
+ soi2.return_value = {
+ "id": "3", "name": "Pool2", "status": "online", "mdisk_count": "1", "vdisk_count": "29", "capacity": "2.72TB", "extent_size": "1024",
+ "free_capacity": "2.64TB", "virtual_capacity": "8.44GB", "used_capacity": "1.89GB", "real_capacity": "52.27GB", "overallocation": "0",
+ "warning": "80", "easy_tier": "auto", "easy_tier_status": "balanced",
+ "tiers": [
+ {"tier": "tier_scm", "tier_mdisk_count": "0", "tier_capacity": "0.00MB", "tier_free_capacity": "0.00MB"},
+ {"tier": "tier0_flash", "tier_mdisk_count": "0", "tier_capacity": "0.00MB", "tier_free_capacity": "0.00MB"},
+ {"tier": "tier1_flash", "tier_mdisk_count": "0", "tier_capacity": "0.00MB", "tier_free_capacity": "0.00MB"},
+ {"tier": "tier_enterprise", "tier_mdisk_count": "0", "tier_capacity": "0.00MB", "tier_free_capacity": "0.00MB"},
+ {"tier": "tier_nearline", "tier_mdisk_count": "1", "tier_capacity": "2.72TB", "tier_free_capacity": "2.64TB"}
+ ],
+ "compression_active": "no", "compression_virtual_capacity": "0.00MB", "compression_compressed_capacity": "0.00MB",
+ "compression_uncompressed_capacity": "0.00MB", "site_id": "2", "site_name": "site2", "parent_mdisk_grp_id": "3",
+ "parent_mdisk_grp_name": "Pool2", "child_mdisk_grp_count": "0", "child_mdisk_grp_capacity": "0.00MB", "type": "parent",
+ "encrypt": "no", "owner_type": "none", "owner_id": "", "owner_name": "", "data_reduction": "no", "used_capacity_before_reduction": "0.00MB",
+ "used_capacity_after_reduction": "0.00MB", "overhead_capacity": "0.00MB", "deduplication_capacity_saving": "0.00MB",
+ "reclaimable_capacity": "0.00MB", "physical_capacity": "2.72TB", "physical_free_capacity": "2.64TB", "shared_resources": "no",
+ "vdisk_protection_enabled": "yes", "vdisk_protection_status": "inactive", "easy_tier_fcm_over_allocation_max": "", "auto_expand": "no",
+ "auto_expand_max_capacity": "0.00MB"
+ }
+ obj = IBMSVCvolume()
+ obj.poolA_data = {
+ "site_name": "site2"
+ }
+ obj.poolB_data = {
+ "site_name": "site2"
+ }
+ data = obj.discover_site_from_pools()
+ self.assertEqual('site2', data[0])
+ self.assertEqual('site2', data[1])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_vdisk_probe(self, svc_authorize_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'type': 'local hyperswap',
+ 'name': 'test_vol',
+ 'state': 'present',
+ 'poolA': 'Pool1',
+ 'poolB': 'Pool2',
+ 'size': '1024'
+ })
+ arg_data = [
+ {
+ 'id': '86', 'name': 'test_vol', 'IO_group_id': '0', 'IO_group_name': 'io_grp0',
+ 'status': 'online', 'mdisk_grp_id': '2', 'mdisk_grp_name': 'Pool1', 'capacity': '1.00GB',
+ 'type': 'striped', 'formatted': 'no', 'formatting': 'yes', 'mdisk_id': '', 'mdisk_name': '',
+ 'FC_id': 'many', 'FC_name': 'many', 'RC_id': '86', 'RC_name': 'rcrel14',
+ 'vdisk_UID': '60050764008581864800000000000675', 'preferred_node_id': '1',
+ 'fast_write_state': 'not_empty', 'cache': 'readwrite', 'udid': '', 'fc_map_count': '2',
+ 'sync_rate': '50', 'copy_count': '1', 'se_copy_count': '0', 'filesystem': '',
+ 'mirror_write_priority': 'latency', 'RC_change': 'no', 'compressed_copy_count': '0',
+ 'access_IO_group_count': '2', 'last_access_time': '', 'parent_mdisk_grp_id': '2',
+ 'parent_mdisk_grp_name': 'Pool1', 'owner_type': 'none', 'owner_id': '', 'owner_name': '',
+ 'encrypt': 'no', 'volume_id': '86', 'volume_name': 'test_vol', 'function': 'master',
+ 'throttle_id': '', 'throttle_name': '', 'IOPs_limit': '', 'bandwidth_limit_MB': '',
+ 'volume_group_id': '', 'volume_group_name': '', 'cloud_backup_enabled': 'no', 'cloud_account_id': '',
+ 'cloud_account_name': '', 'backup_status': 'off', 'last_backup_time': '', 'restore_status': 'none',
+ 'backup_grain_size': '', 'deduplicated_copy_count': '0', 'protocol': ''
+ },
+ {
+ 'copy_id': '0', 'status': 'online', 'sync': 'yes', 'auto_delete': 'no', 'primary': 'yes',
+ 'mdisk_grp_id': '2', 'mdisk_grp_name': 'Pool1', 'type': 'striped', 'mdisk_id': '', 'mdisk_name': '',
+ 'fast_write_state': 'not_empty', 'used_capacity': '1.00GB', 'real_capacity': '1.00GB',
+ 'free_capacity': '0.00MB', 'overallocation': '100', 'autoexpand': '', 'warning': '', 'grainsize': '',
+ 'se_copy': 'no', 'easy_tier': 'on', 'easy_tier_status': 'balanced',
+ 'tiers': [
+ {'tier': 'tier_scm', 'tier_capacity': '0.00MB'},
+ {'tier': 'tier0_flash', 'tier_capacity': '0.00MB'},
+ {'tier': 'tier1_flash', 'tier_capacity': '0.00MB'},
+ {'tier': 'tier_enterprise', 'tier_capacity': '1.00GB'},
+ {'tier': 'tier_nearline', 'tier_capacity': '0.00MB'}
+ ],
+ 'compressed_copy': 'no', 'uncompressed_used_capacity': '1.00GB', 'parent_mdisk_grp_id': '2',
+ 'parent_mdisk_grp_name': 'Pool1', 'encrypt': 'no', 'deduplicated_copy': 'no', 'used_capacity_before_reduction': ''
+ }
+ ]
+ obj = IBMSVCvolume()
+ data = obj.vdisk_probe(arg_data)
+ self.assertEqual([], data)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_volume_create(self, svc_authorize_mock, svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'type': 'local hyperswap',
+ 'name': 'test_vol',
+ 'state': 'present',
+ 'poolA': 'Pool1',
+ 'poolB': 'Pool2',
+ 'size': '1024'
+ })
+ svc_run_command_mock.return_value = {
+ 'id': '86',
+ 'message': 'Volume, id [86], successfully created'
+ }
+ obj = IBMSVCvolume()
+ data = obj.volume_create()
+ self.assertEqual(None, data)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_vdisk_create(self, svc_authorize_mock, svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'type': 'local hyperswap',
+ 'name': 'test_vol',
+ 'state': 'present',
+ 'poolA': 'Pool1',
+ 'poolB': 'Pool2',
+ 'size': '1024'
+ })
+ svc_run_command_mock.return_value = {
+ 'id': '86',
+ 'message': 'Volume, id [86], successfully created'
+ }
+ obj = IBMSVCvolume()
+ data = obj.vdisk_create()
+ self.assertEqual(None, data)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_addvolumecopy(self, svc_authorize_mock, svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'type': 'local hyperswap',
+ 'name': 'test_vol',
+ 'state': 'present',
+ 'poolA': 'Pool1',
+ 'poolB': 'Pool2'
+ })
+ obj = IBMSVCvolume()
+ data = obj.addvolumecopy()
+ self.assertEqual(None, data)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_mirrored_volume.IBMSVCvolume.discover_site_from_pools')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_addvdiskcopy(self, svc_authorize_mock, svc_run_command_mock, dsfp):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'type': 'local hyperswap',
+ 'name': 'test_vol',
+ 'state': 'present',
+ 'poolA': 'Pool1',
+ 'poolB': 'Pool2'
+ })
+ dsfp.return_value = ('site1', 'site1')
+ svc_run_command_mock.return_value = None
+ with pytest.raises(AnsibleFailJson) as exc:
+ obj = IBMSVCvolume()
+ data = obj.addvdiskcopy()
+ self.assertEqual(True, exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_rmvolumecopy(self, svc_authorize_mock, svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'type': 'local hyperswap',
+ 'name': 'test_vol',
+ 'state': 'present',
+ 'poolA': 'Pool1',
+ 'poolB': 'Pool2'
+ })
+ svc_run_command_mock.return_value = None
+ obj = IBMSVCvolume()
+ data = obj.rmvolumecopy()
+ self.assertEqual(None, data)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_vdisk_update(self, svc_authorize_mock, svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'type': 'local hyperswap',
+ 'name': 'test_vol',
+ 'state': 'present',
+ 'poolA': 'Pool1',
+ 'poolB': 'Pool2'
+ })
+ obj = IBMSVCvolume()
+ data = obj.vdisk_update([])
+ self.assertEqual(None, data)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_volume_delete(self, svc_authorize_mock, svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'type': 'local hyperswap',
+ 'name': 'test_vol',
+ 'state': 'present',
+ 'poolA': 'Pool1',
+ 'poolB': 'Pool2'
+ })
+ obj = IBMSVCvolume()
+ data = obj.volume_delete()
+ self.assertEqual(data, None)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_discover_system_topology(self, svc_authorize_mock, svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'type': 'local hyperswap',
+ 'name': 'test_vol',
+ 'state': 'present',
+ 'poolA': 'Pool1',
+ 'poolB': 'Pool2'
+ })
+ svc_run_command_mock.return_value = {
+ 'id': '0000010021606192', 'name': 'altran-v7khs', 'location': 'local', 'partnership': '',
+ 'total_mdisk_capacity': '5.8TB', 'space_in_mdisk_grps': '5.8TB', 'space_allocated_to_vdisks': '134.76GB',
+ 'total_free_space': '5.6TB', 'total_vdiskcopy_capacity': '13.76GB', 'total_used_capacity': '30.11GB',
+ 'total_overallocation': '0', 'total_vdisk_capacity': '13.66GB', 'total_allocated_extent_capacity': '189.00GB',
+ 'statistics_status': 'on', 'statistics_frequency': '15', 'cluster_locale': 'en_US', 'time_zone': '522 UTC',
+ 'code_level': '8.4.0.0 (build 152.16.2009091545000)', 'console_IP': '9.71.42.198:443', 'id_alias': '0000010021606192',
+ 'gm_link_tolerance': '300', 'gm_inter_cluster_delay_simulation': '0', 'gm_intra_cluster_delay_simulation': '0',
+ 'gm_max_host_delay': '5', 'email_reply': '', 'email_contact': '', 'email_contact_primary': '',
+ 'email_contact_alternate': '', 'email_contact_location': '', 'email_contact2': '', 'email_contact2_primary': '',
+ 'email_contact2_alternate': '', 'email_state': 'stopped', 'inventory_mail_interval': '0', 'cluster_ntp_IP_address': '',
+ 'cluster_isns_IP_address': '', 'iscsi_auth_method': 'none', 'iscsi_chap_secret': '', 'auth_service_configured': 'no',
+ 'auth_service_enabled': 'no', 'auth_service_url': '', 'auth_service_user_name': '', 'auth_service_pwd_set': 'no',
+ 'auth_service_cert_set': 'no', 'auth_service_type': 'ldap', 'relationship_bandwidth_limit': '25',
+ 'tiers': [
+ {'tier': 'tier_scm', 'tier_capacity': '0.00MB', 'tier_free_capacity': '0.00MB'},
+ {'tier': 'tier0_flash', 'tier_capacity': '2.51TB', 'tier_free_capacity': '2.47TB'},
+ {'tier': 'tier1_flash', 'tier_capacity': '0.00MB', 'tier_free_capacity': '0.00MB'},
+ {'tier': 'tier_enterprise', 'tier_capacity': '553.00GB', 'tier_free_capacity': '474.00GB'},
+ {'tier': 'tier_nearline', 'tier_capacity': '2.72TB', 'tier_free_capacity': '2.65TB'}
+ ],
+ 'easy_tier_acceleration': 'off', 'has_nas_key': 'no', 'layer': 'storage', 'rc_buffer_size': '48',
+ 'compression_active': 'yes', 'compression_virtual_capacity': '200.00MB', 'compression_compressed_capacity': '0.31MB',
+ 'compression_uncompressed_capacity': '0.00MB', 'cache_prefetch': 'on', 'email_organization': '',
+ 'email_machine_address': '', 'email_machine_city': '', 'email_machine_state': 'XX', 'email_machine_zip': '',
+ 'email_machine_country': '', 'total_drive_raw_capacity': '10.10TB', 'compression_destage_mode': 'off',
+ 'local_fc_port_mask': '1111111111111111111111111111111111111111111111111111111111111111',
+ 'partner_fc_port_mask': '1111111111111111111111111111111111111111111111111111111111111111', 'high_temp_mode': 'off',
+ 'topology': 'hyperswap', 'topology_status': 'dual_site', 'rc_auth_method': 'none', 'vdisk_protection_time': '15',
+ 'vdisk_protection_enabled': 'no', 'product_name': 'IBM Storwize V7000', 'odx': 'off', 'max_replication_delay': '0',
+ 'partnership_exclusion_threshold': '315', 'gen1_compatibility_mode_enabled': 'no', 'ibm_customer': '',
+ 'ibm_component': '', 'ibm_country': '', 'tier_scm_compressed_data_used': '0.00MB',
+ 'tier0_flash_compressed_data_used': '0.00MB', 'tier1_flash_compressed_data_used': '0.00MB',
+ 'tier_enterprise_compressed_data_used': '36.00MB', 'tier_nearline_compressed_data_used': '0.00MB',
+ 'total_reclaimable_capacity': '0.00MB', 'physical_capacity': '5.77TB', 'physical_free_capacity': '5.58TB',
+ 'used_capacity_before_reduction': '0.00MB', 'used_capacity_after_reduction': '2.04GB', 'overhead_capacity': '26.00GB',
+ 'deduplication_capacity_saving': '0.00MB', 'enhanced_callhome': 'on', 'censor_callhome': 'off', 'host_unmap': 'off',
+ 'backend_unmap': 'on', 'quorum_mode': 'standard', 'quorum_site_id': '', 'quorum_site_name': '', 'quorum_lease': 'short',
+ 'parent_seq_no': '', 'automatic_vdisk_analysis_enabled': 'on'
+ }
+ obj = IBMSVCvolume()
+ data = obj.discover_system_topology()
+ self.assertEqual('hyperswap', data)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_mirrored_volume.IBMSVCvolume.get_existing_vdisk')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_mirrored_volume.IBMSVCvolume.discover_system_topology')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_mirrored_volume.IBMSVCvolume.basic_checks')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_hs_volume(self, svc_authorize_mock, bc, dst, gev, src):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'type': 'local hyperswap',
+ 'name': 'test_volume',
+ 'state': 'present',
+ 'poolA': 'Pool1',
+ 'poolB': 'Pool2',
+ 'size': 1024
+ })
+ gev.return_value = None
+ src.return_value = {
+ 'id': '86',
+ 'message': 'Volume, id [86], successfully created'
+ }
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj = IBMSVCvolume()
+ data = obj.apply()
+ self.assertEqual(True, exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_mirrored_volume.IBMSVCvolume.get_existing_vdisk')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_mirrored_volume.IBMSVCvolume.discover_system_topology')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_mirrored_volume.IBMSVCvolume.basic_checks')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_hs_volume_thin(self, svc_authorize_mock, bc, dst, gev, src):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'type': 'local hyperswap',
+ 'name': 'test_volume',
+ 'state': 'present',
+ 'poolA': 'Pool1',
+ 'poolB': 'Pool2',
+ 'size': 1024,
+ 'thin': True
+ })
+ gev.return_value = None
+ src.return_value = {
+ 'id': '86',
+ 'message': 'Volume, id [86], successfully created'
+ }
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj = IBMSVCvolume()
+ data = obj.apply()
+ self.assertEqual(True, exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_mirrored_volume.IBMSVCvolume.get_existing_vdisk')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_mirrored_volume.IBMSVCvolume.discover_system_topology')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_mirrored_volume.IBMSVCvolume.basic_checks')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_delete_hs_volume(self, svc_authorize_mock, bc, dst, gev, src):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'type': 'local hyperswap',
+ 'name': 'test_volume',
+ 'state': 'absent'
+ })
+ gev.return_value = [
+ {
+ 'id': '130', 'name': 'test_volume', 'IO_group_id': '0', 'IO_group_name': 'io_grp0', 'status': 'online',
+ 'mdisk_grp_id': '2', 'mdisk_grp_name': 'Pool1', 'capacity': '1.00GB', 'type': 'striped', 'formatted': 'yes',
+ 'formatting': 'no', 'mdisk_id': '', 'mdisk_name': '', 'FC_id': 'many', 'FC_name': 'many', 'RC_id': '130',
+ 'RC_name': 'rcrel25', 'vdisk_UID': '600507640085818648000000000006A1', 'preferred_node_id': '2',
+ 'fast_write_state': 'empty', 'cache': 'readwrite', 'udid': '', 'fc_map_count': '2', 'sync_rate': '50',
+ 'copy_count': '1', 'se_copy_count': '0', 'filesystem': '', 'mirror_write_priority': 'latency',
+ 'RC_change': 'no', 'compressed_copy_count': '0', 'access_IO_group_count': '2', 'last_access_time': '',
+ 'parent_mdisk_grp_id': '2', 'parent_mdisk_grp_name': 'Pool1', 'owner_type': 'none', 'owner_id': '',
+ 'owner_name': '', 'encrypt': 'no', 'volume_id': '130', 'volume_name': 'test_volume', 'function': 'master',
+ 'throttle_id': '', 'throttle_name': '', 'IOPs_limit': '', 'bandwidth_limit_MB': '', 'volume_group_id': '',
+ 'volume_group_name': '', 'cloud_backup_enabled': 'no', 'cloud_account_id': '', 'cloud_account_name': '',
+ 'backup_status': 'off', 'last_backup_time': '', 'restore_status': 'none', 'backup_grain_size': '',
+ 'deduplicated_copy_count': '0', 'protocol': ''
+ },
+ {
+ 'copy_id': '0', 'status': 'online', 'sync': 'yes', 'auto_delete': 'no', 'primary': 'yes', 'mdisk_grp_id': '2',
+ 'mdisk_grp_name': 'Pool1', 'type': 'striped', 'mdisk_id': '', 'mdisk_name': '', 'fast_write_state': 'empty',
+ 'used_capacity': '1.00GB', 'real_capacity': '1.00GB', 'free_capacity': '0.00MB', 'overallocation': '100',
+ 'autoexpand': '', 'warning': '', 'grainsize': '', 'se_copy': 'no', 'easy_tier': 'on',
+ 'easy_tier_status': 'balanced', 'tiers': [
+ {'tier': 'tier_scm', 'tier_capacity': '0.00MB'},
+ {'tier': 'tier0_flash', 'tier_capacity': '0.00MB'},
+ {'tier': 'tier1_flash', 'tier_capacity': '0.00MB'},
+ {'tier': 'tier_enterprise', 'tier_capacity': '1.00GB'},
+ {'tier': 'tier_nearline', 'tier_capacity': '0.00MB'}
+ ],
+ 'compressed_copy': 'no', 'uncompressed_used_capacity': '1.00GB', 'parent_mdisk_grp_id': '2',
+ 'parent_mdisk_grp_name': 'Pool1', 'encrypt': 'no', 'deduplicated_copy': 'no', 'used_capacity_before_reduction': ''
+ }
+ ]
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj = IBMSVCvolume()
+ data = obj.apply()
+ self.assertEqual(True, exc.value.args[0]['changed'])
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_ownershipgroup.py b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_ownershipgroup.py
new file mode 100644
index 000000000..40d550878
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_ownershipgroup.py
@@ -0,0 +1,320 @@
+# Copyright (C) 2020 IBM CORPORATION
+# Author(s): Sanjaikumaar M <sanjaikumaar.m@ibm.com>
+#
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+""" unit tests IBM Storage Virtualize Ansible module: ibm_svc_manage_ownershipgroup """
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+import unittest
+import pytest
+import json
+from mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi
+from ansible_collections.ibm.storage_virtualize.plugins.modules.ibm_svc_manage_ownershipgroup import \
+ IBMSVCOwnershipgroup
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module
+ creation """
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the
+ test case """
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the
+ test case """
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an
+ exception """
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an
+ exception """
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class TestIBMSVCOwnershipgroup(unittest.TestCase):
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def setUp(self, connect):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.restapi = IBMSVCRestApi(self.mock_module_helper, '1.2.3.4',
+ 'domain.ibm.com', 'username', 'password',
+ False, 'test.log', '')
+
+ def set_default_args(self):
+ return dict({
+ 'name': 'test',
+ 'state': 'present'
+ })
+
+ def test_module_fail_when_required_args_missing(self):
+ """ required arguments are reported as errors """
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ IBMSVCOwnershipgroup()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ def test_module_fail_when_name_parameter_missing(self):
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password'
+ })
+ IBMSVCOwnershipgroup()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ def test_module_fail_when_name_is_blank(self):
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': ''
+ })
+ IBMSVCOwnershipgroup()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ def test_module_fail_when_state_parameter_missing(self):
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'name': 'ansible_owshgroup',
+ 'username': 'username',
+ 'password': 'password'
+ })
+ IBMSVCOwnershipgroup()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ def test_module_fail_when_state_parameter_is_blank(self):
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'name': 'ansible_owshgroup',
+ 'username': 'username',
+ 'password': 'password',
+ 'state': ''
+ })
+ IBMSVCOwnershipgroup()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_ownershipgroup.IBMSVCOwnershipgroup.check_existing_owgroups')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_ownershipgroup_with_keepobjects(self,
+ svc_authorize_mock,
+ check_existing_ownership_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'ansible_owshgroup',
+ 'keepobjects': True
+ })
+
+ check_existing_ownership_mock.return_value = False
+
+ ownership = IBMSVCOwnershipgroup()
+ with pytest.raises(AnsibleFailJson) as exc:
+ ownership.apply()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_ownershipgroup.IBMSVCOwnershipgroup.check_existing_owgroups')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_ownershipgroup(self, svc_authorize_mock,
+ svc_run_mock,
+ check_existing_ownership_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'ansible_owshgroup'
+ })
+ message = {
+ 'id': '0',
+ 'message': 'Ownership Group, id [0], successfully created'
+ }
+ svc_run_mock.return_value = message
+ check_existing_ownership_mock.return_value = {}
+
+ ownership = IBMSVCOwnershipgroup()
+ with pytest.raises(AnsibleExitJson) as exc:
+ ownership.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_ownershipgroup.IBMSVCOwnershipgroup.check_existing_owgroups')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_existing_owgroups(self,
+ svc_authorize_mock,
+ check_existing_ownership_mock):
+
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'ansible_owshgroup'
+ })
+ return_val = {
+ 'id': '0',
+ 'name': 'ansible_owshgroup'
+ }
+ check_existing_ownership_mock.return_value = return_val
+ ownership = IBMSVCOwnershipgroup()
+ with pytest.raises(AnsibleExitJson) as exc:
+ ownership.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_ownershipgroup.IBMSVCOwnershipgroup.check_existing_owgroups')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_delete_owgrp_without_keepobjs(self,
+ svc_authorize_mock,
+ check_existing_ownership_mock,
+ svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'absent',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'ansible_owshgroup'
+ })
+ check_existing_ownership_mock.return_value = True
+ ownership = IBMSVCOwnershipgroup()
+ with pytest.raises(AnsibleExitJson) as exc:
+ ownership.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_ownershipgroup.IBMSVCOwnershipgroup.check_existing_owgroups')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ def test_delete_owgrp_with_keepobjs_scenario_1(self,
+ svc_run_command_mock,
+ check_existing_ownership_mock,
+ svc_authorize_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'absent',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'ansible_owshgroup',
+ 'keepobjects': True
+ })
+ check_existing_ownership_mock.return_value = True
+ ownership = IBMSVCOwnershipgroup()
+ with pytest.raises(AnsibleExitJson) as exc:
+ ownership.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_ownershipgroup.IBMSVCOwnershipgroup.check_existing_owgroups')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_token_wrap')
+ def test_delete_owgrp_with_keepobjs_scenario_2(self,
+ svc_token_mock,
+ check_existing_ownership_mock,
+ svc_authorize_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'absent',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'ansible_owshgroup',
+ 'keepobjects': True
+ })
+
+ check_existing_ownership_mock.return_value = True
+ svc_token_mock.return_value = {
+ 'err': True,
+ 'out': 'Ownership group associated with one or more usergroup'
+ }
+ ownership = IBMSVCOwnershipgroup()
+ with pytest.raises(AnsibleFailJson) as exc:
+ ownership.apply()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_ownershipgroup.IBMSVCOwnershipgroup.check_existing_owgroups')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ def test_delete_owgrp_non_existence(self,
+ svc_run_command_mock,
+ check_existing_owgroup_mock,
+ svc_authorize_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'absent',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'ansible_owshgroup',
+ 'keepobjects': True
+ })
+ check_existing_owgroup_mock.return_value = False
+ ownership = IBMSVCOwnershipgroup()
+ with pytest.raises(AnsibleExitJson) as exc:
+ ownership.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_portset.py b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_portset.py
new file mode 100644
index 000000000..702815f03
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_portset.py
@@ -0,0 +1,415 @@
+# Copyright (C) 2022 IBM CORPORATION
+# Author(s): Sanjaikumaar M <sanjaikumaar.m@ibm.com>
+#
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+""" unit tests IBM Storage Virtualize Ansible module: ibm_svc_manage_portset """
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import unittest
+import pytest
+import json
+from mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi
+from ansible_collections.ibm.storage_virtualize.plugins.modules.ibm_svc_manage_portset import IBMSVCPortset
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module
+ creation """
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the
+ test case """
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the
+ test case """
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an
+ exception """
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs):
+ """function to patch over fail_json; package return data into an
+ exception """
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class TestIBMSVCPortset(unittest.TestCase):
+ """
+ Group of related Unit Tests
+ """
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def setUp(self, connect):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.restapi = IBMSVCRestApi(self.mock_module_helper, '1.2.3.4',
+ 'domain.ibm.com', 'username', 'password',
+ False, 'test.log', '')
+
+ def test_module_with_blank_values(self):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': '',
+ 'state': 'present'
+ })
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ IBMSVCPortset()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ def test_mutually_exclusive_case(self):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'portset0',
+ 'ownershipgroup': 'new_owner',
+ 'noownershipgroup': True,
+ 'state': 'present'
+ })
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ IBMSVCPortset()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_portset.IBMSVCPortset.is_portset_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_fc_portset_with_replication_type_params(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ portset_exist_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'portset0',
+ 'porttype': 'fc',
+ 'portset_type': 'replication',
+ 'state': 'present'
+ })
+
+ portset_exist_mock.return_value = {}
+ p = IBMSVCPortset()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ p.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_portset.IBMSVCPortset.is_portset_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_portset_without_optional_params(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ portset_exist_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'portset0',
+ 'state': 'present'
+ })
+
+ portset_exist_mock.return_value = {}
+ p = IBMSVCPortset()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ p.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_portset.IBMSVCPortset.is_portset_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_portset_with_optional_params(self, svc_authorize_mock,
+ svc_run_command_mock,
+ portset_exist_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'portset0',
+ 'ownershipgroup': 'new_owner',
+ 'portset_type': 'replication',
+ 'state': 'present'
+ })
+
+ portset_exist_mock.return_value = {}
+ p = IBMSVCPortset()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ p.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_portset.IBMSVCPortset.is_portset_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_fc_portset_with_optional_params(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ portset_exist_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'portset0',
+ 'porttype': 'fc',
+ 'ownershipgroup': 'new_owner',
+ 'portset_type': 'host',
+ 'state': 'present'
+ })
+
+ portset_exist_mock.return_value = {}
+ p = IBMSVCPortset()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ p.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_portset_idempotency(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ svc_obj_info_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'portset0',
+ 'ownershipgroup': 'new_owner',
+ 'portset_type': 'host',
+ 'state': 'present'
+ })
+
+ svc_obj_info_mock.return_value = {
+ "id": "4",
+ "name": "portset0",
+ "type": "host",
+ "port_count": "0",
+ "host_count": "0",
+ "lossless": "",
+ "owner_id": "0",
+ "owner_name": "new_owner"
+ }
+ p = IBMSVCPortset()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ p.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_update_portset(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ svc_obj_info_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'portset0',
+ 'noownershipgroup': True,
+ 'state': 'present'
+ })
+
+ svc_obj_info_mock.return_value = {
+ "id": "4",
+ "name": "portset0",
+ "type": "host",
+ "port_count": "0",
+ "host_count": "0",
+ "lossless": "",
+ "owner_id": "0",
+ "owner_name": "new_owner"
+ }
+ p = IBMSVCPortset()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ p.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_portset.IBMSVCPortset.is_portset_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_portset_rename(self, svc_authorize_mock,
+ svc_run_command_mock,
+ svc_obj_info_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'new_name',
+ 'old_name': 'portset0',
+ 'state': 'present'
+ })
+ svc_obj_info_mock.return_value = {
+ "id": "4",
+ "name": "portset0",
+ "type": "host",
+ "port_count": "0",
+ "host_count": "0",
+ "lossless": "",
+ "owner_id": "0",
+ "owner_name": "new_owner"
+ }
+
+ arg_data = []
+ v = IBMSVCPortset()
+ data = v.portset_rename(arg_data)
+ self.assertEqual(data, 'Portset [portset0] has been successfully rename to [new_name].')
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_portset.IBMSVCPortset.is_portset_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_delete_portset_with_extra_param(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ portset_exist_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'portset0',
+ 'portset_type': 'host',
+ 'ownershipgroup': 'owner1',
+ 'state': 'absent'
+ })
+
+ portset_exist_mock.return_value = {
+ "id": "4",
+ "name": "portset0",
+ "type": "host",
+ "port_count": "0",
+ "host_count": "0",
+ "lossless": "",
+ "owner_id": "0",
+ "owner_name": "new_owner"
+ }
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ IBMSVCPortset()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_portset.IBMSVCPortset.is_portset_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_delete_portset(self, svc_authorize_mock,
+ svc_run_command_mock,
+ portset_exist_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'portset0',
+ 'state': 'absent'
+ })
+
+ portset_exist_mock.return_value = {
+ "id": "4",
+ "name": "portset0",
+ "port_count": "0",
+ "host_count": "0",
+ "lossless": "",
+ "owner_id": "0",
+ "owner_name": "new_owner"
+ }
+ p = IBMSVCPortset()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ p.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_portset.IBMSVCPortset.is_portset_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_delete_portset_idempotency(self, svc_authorize_mock,
+ svc_run_command_mock,
+ portset_exist_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'portset0',
+ 'state': 'absent'
+ })
+
+ portset_exist_mock.return_value = {}
+ p = IBMSVCPortset()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ p.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_replication.py b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_replication.py
new file mode 100644
index 000000000..8953ebd4b
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_replication.py
@@ -0,0 +1,856 @@
+# Copyright (C) 2020 IBM CORPORATION
+# Author(s):
+#
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+""" unit tests IBM Storage Virtualize Ansible module: ibm_svc_manage_replication """
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import unittest
+import pytest
+import json
+from mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi
+from ansible_collections.ibm.storage_virtualize.plugins.modules.ibm_svc_manage_replication import IBMSVCManageReplication
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation """
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the
+ test case """
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the
+ test case """
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an
+ exception """
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an
+ exception """
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class TestIBMSVCManageReplication(unittest.TestCase):
+ """ a group of related Unit Tests"""
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def setUp(self, connect):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.restapi = IBMSVCRestApi(self.mock_module_helper, '1.2.3.4',
+ 'domain.ibm.com', 'username', 'password',
+ False, 'test.log', '')
+
+ def set_default_args(self):
+ return dict({
+ 'name': 'test',
+ 'state': 'present'
+ })
+
+ def test_module_fail_when_required_args_missing(self):
+ """ required arguments are reported as errors """
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ IBMSVCManageReplication()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_existing_vdisk(self, svc_authorize_mock, svc_obj_info_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'remotecluster': 'test_remotecluster',
+ 'master': 'test_master',
+ 'aux': 'test_aux',
+ 'copytype': 'metro',
+ 'sync': 'true',
+ 'consistgrp': 'test_consistency_group',
+ })
+ svc_obj_info_mock.return_value = {
+ 'id': '157',
+ 'name': 'test_name',
+ 'master_cluster_id': '0000020321E04566',
+ 'master_cluster_name': 'FlashSystem V9000',
+ 'master_vdisk_id': '157',
+ 'master_vdisk_name': 'test_master',
+ 'aux_cluster_id': '0000020321E04566',
+ 'aux_cluster_name': 'FlashSystem V9000',
+ 'aux_vdisk_id': '161',
+ 'aux_vdisk_name': 'test_aux',
+ 'primary': 'aux',
+ 'consistency_group_id': '8',
+ 'consistency_group_name': 'test_consistency_group',
+ 'state': 'consistent_synchronized',
+ 'bg_copy_priority': '50',
+ 'progress': '',
+ 'freeze_time': '',
+ 'status': 'online',
+ 'sync': '',
+ 'copy_type': 'metro',
+ 'cycling_mode': '',
+ 'cycle_period_seconds': '300',
+ 'master_change_vdisk_id': '',
+ 'master_change_vdisk_name': '',
+ 'aux_change_vdisk_id': '',
+ 'aux_change_vdisk_name': ''
+ }
+ obj = IBMSVCManageReplication()
+ rc_data = obj.existing_vdisk('test_name')
+ self.assertEqual('test_name', rc_data['name'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_existing_rc(self, svc_authorize_mock, svc_obj_info_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'remotecluster': 'test_remotecluster',
+ 'master': 'test_master',
+ 'aux': 'test_aux',
+ 'copytype': 'metro',
+ 'sync': 'true',
+ 'consistgrp': 'test_consistency_group',
+ })
+ svc_obj_info_mock.return_value = {
+ 'id': '157',
+ 'name': 'test_name',
+ 'master_cluster_id': '0000020321E04566',
+ 'master_cluster_name': 'FlashSystem V9000',
+ 'master_vdisk_id': '157',
+ 'master_vdisk_name': 'test_master',
+ 'aux_cluster_id': '0000020321E04566',
+ 'aux_cluster_name': 'FlashSystem V9000',
+ 'aux_vdisk_id': '161',
+ 'aux_vdisk_name': 'test_aux',
+ 'primary': 'aux',
+ 'consistency_group_id': '8',
+ 'consistency_group_name': 'test_consistency_group',
+ 'state': 'consistent_synchronized',
+ 'bg_copy_priority': '50',
+ 'progress': '',
+ 'freeze_time': '',
+ 'status': 'online',
+ 'sync': '',
+ 'copy_type': 'metro',
+ 'cycling_mode': '',
+ 'cycle_period_seconds': '300',
+ 'master_change_vdisk_id': '',
+ 'master_change_vdisk_name': '',
+ 'aux_change_vdisk_id': '',
+ 'aux_change_vdisk_name': ''
+ }
+ obj = IBMSVCManageReplication()
+ rc_data = obj.existing_rc()
+ self.assertEqual('test_name', rc_data['name'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_cycleperiod_update(self, svc_authorize_mock, svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'remotecluster': 'test_remotecluster',
+ 'master': 'test_master',
+ 'aux': 'test_aux',
+ 'copytype': 'GMCV',
+ 'sync': 'true',
+ 'consistgrp': 'test_consistency_group',
+ 'cyclingperiod': 300
+ })
+ svc_run_command_mock.return_value = None
+ obj = IBMSVCManageReplication()
+ return_data = obj.cycleperiod_update()
+ self.assertEqual(None, return_data)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_cyclemode_update(self, svc_authorize_mock, svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'remotecluster': 'test_remotecluster',
+ 'master': 'test_master',
+ 'aux': 'test_aux',
+ 'copytype': 'GMCV',
+ 'sync': 'true',
+ 'consistgrp': 'test_consistency_group',
+ 'cyclingperiod': 300
+ })
+ svc_run_command_mock.return_value = None
+ obj = IBMSVCManageReplication()
+ return_data = obj.cyclemode_update()
+ self.assertEqual(None, return_data)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_rcrelationship_probe_metro(self, svc_authorize_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'remotecluster': 'test_remotecluster',
+ 'master': 'test_master',
+ 'aux': 'test_aux',
+ 'copytype': 'metro',
+ 'sync': 'true',
+ 'consistgrp': 'test_consistency_group',
+ })
+ arg_data = {
+ 'id': '157',
+ 'name': 'test_name',
+ 'master_cluster_id': '0000020321E04566',
+ 'master_cluster_name': 'FlashSystem V9000',
+ 'master_vdisk_id': '157',
+ 'master_vdisk_name': 'test_master_1',
+ 'aux_cluster_id': '0000020321E04566',
+ 'aux_cluster_name': 'FlashSystem V9000',
+ 'aux_vdisk_id': '161',
+ 'aux_vdisk_name': 'test_aux_1',
+ 'primary': 'aux',
+ 'consistency_group_id': '8',
+ 'consistency_group_name': 'test_consistency_group_1',
+ 'state': 'consistent_synchronized',
+ 'bg_copy_priority': '50',
+ 'progress': '',
+ 'freeze_time': '',
+ 'status': 'online',
+ 'sync': '',
+ 'copy_type': 'global',
+ 'cycling_mode': '',
+ 'cycle_period_seconds': '300',
+ 'master_change_vdisk_id': '',
+ 'master_change_vdisk_name': '',
+ 'aux_change_vdisk_id': '',
+ 'aux_change_vdisk_name': ''
+ }
+ obj = IBMSVCManageReplication()
+ probe_return = obj.rcrelationship_probe(arg_data)
+ self.assertEqual('test_consistency_group', probe_return[0]['consistgrp'])
+ self.assertEqual('test_master', probe_return[0]['master'])
+ self.assertEqual('test_aux', probe_return[0]['aux'])
+ self.assertEqual(True, probe_return[0]['metro'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_replication.IBMSVCManageReplication.cyclemode_update')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_replication.IBMSVCManageReplication.cycleperiod_update')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_rcrelationship_update(self, svc_authorize_mock, svc_run_command_mock, cp_u_mock, cm_u_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'remotecluster': 'test_remotecluster',
+ 'master': 'test_master',
+ 'aux': 'test_aux',
+ 'copytype': 'global',
+ 'sync': 'true',
+ 'consistgrp': 'test_consistency_group',
+ 'cyclingperiod': 299
+ })
+ modify = {
+ 'consistgrp': 'test_consistency_group',
+ 'master': 'test_master',
+ 'aux': 'test_aux',
+ 'metro': True
+ }
+ modifycv = {
+ 'masterchange': 'test_masterchange_volume',
+ 'nomasterchange': 'test_auxchange_volume',
+ 'cycleperiodseconds': 299,
+ 'cyclingmode': True,
+ }
+ svc_run_command_mock.return_value = None
+ cp_u_mock.return_value = None
+ cm_u_mock.return_value = None
+ obj = IBMSVCManageReplication()
+ update_return = obj.rcrelationship_update(modify, modifycv)
+ self.assertEqual(None, update_return)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_replication.IBMSVCManageReplication.existing_rc')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create(self, svc_authorize_mock, svc_run_command_mock, existing_rc_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'remotecluster': 'test_remotecluster',
+ 'master': 'test_master',
+ 'aux': 'test_aux',
+ 'copytype': 'global',
+ 'sync': 'true',
+ 'consistgrp': 'test_consistency_group'
+ })
+ svc_run_command_mock.return_value = {
+ 'message': 'RC Relationship, id [0], successfully created',
+ 'id': '0'
+ }
+ existing_rc_mock.return_value = {
+ 'id': '0',
+ 'name': 'test_name',
+ 'master_cluster_id': '0000020321E04566',
+ 'master_cluster_name': 'FlashSystem V9000',
+ 'master_vdisk_id': '0',
+ 'master_vdisk_name': 'test_master',
+ 'aux_cluster_id': '0000020321E04566',
+ 'aux_cluster_name': 'FlashSystem V9000',
+ 'aux_vdisk_id': '161',
+ 'aux_vdisk_name': 'test_aux',
+ 'primary': 'aux',
+ 'consistency_group_id': '8',
+ 'consistency_group_name': 'test_consistency_group',
+ 'state': 'consistent_synchronized',
+ 'bg_copy_priority': '50',
+ 'progress': '',
+ 'freeze_time': '',
+ 'status': 'online',
+ 'sync': '',
+ 'copy_type': 'metro',
+ 'cycling_mode': '',
+ 'cycle_period_seconds': '300',
+ 'master_change_vdisk_id': '',
+ 'master_change_vdisk_name': '',
+ 'aux_change_vdisk_id': '',
+ 'aux_change_vdisk_name': ''
+ }
+ obj = IBMSVCManageReplication()
+ create_return = obj.create()
+ self.assertEqual('test_name', create_return['name'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_replication.IBMSVCManageReplication.existing_rc')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_with_invalid_copytype(self, svc_authorize_mock, svc_run_command_mock, existing_rc_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'remotecluster': 'test_remotecluster',
+ 'master': 'test_master',
+ 'aux': 'test_aux',
+ 'copytype': 'wrong_input',
+ 'sync': 'true',
+ 'consistgrp': 'test_consistency_group'
+ })
+ svc_run_command_mock.return_value = {}
+ with pytest.raises(AnsibleFailJson) as exc:
+ obj = IBMSVCManageReplication()
+ obj.create()
+ self.assertEqual(True, exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_replication.IBMSVCManageReplication.existing_rc')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_for_failure(self, svc_authorize_mock, svc_run_command_mock, existing_rc_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'remotecluster': 'test_remotecluster',
+ 'master': 'test_master',
+ 'aux': 'test_aux',
+ 'copytype': 'metro',
+ 'sync': 'true',
+ 'consistgrp': 'test_consistency_group'
+ })
+ svc_run_command_mock.return_value = {}
+ with pytest.raises(AnsibleFailJson) as exc:
+ obj = IBMSVCManageReplication()
+ obj.create()
+ self.assertEqual(True, exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_replication.IBMSVCManageReplication.existing_rc')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_parameter_name_missing(self, svc_authorize_mock, svc_run_command_mock, existing_rc_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'remotecluster': 'test_remotecluster',
+ 'master': 'test_master',
+ 'aux': 'test_aux',
+ 'copytype': 'metro',
+ 'sync': 'true',
+ 'consistgrp': 'test_consistency_group'
+ })
+ svc_run_command_mock.return_value = {}
+ with pytest.raises(AnsibleFailJson) as exc:
+ obj = IBMSVCManageReplication()
+ obj.create()
+ self.assertEqual(True, exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_replication.IBMSVCManageReplication.existing_rc')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_parameter_master_missing(self, svc_authorize_mock, svc_run_command_mock, existing_rc_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'remotecluster': 'test_remotecluster',
+ 'name': 'test_name',
+ 'aux': 'test_aux',
+ 'copytype': 'metro',
+ 'sync': 'true',
+ 'consistgrp': 'test_consistency_group'
+ })
+ svc_run_command_mock.return_value = {}
+ with pytest.raises(AnsibleFailJson) as exc:
+ obj = IBMSVCManageReplication()
+ obj.create()
+ self.assertEqual(True, exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_replication.IBMSVCManageReplication.existing_rc')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_parameter_aux_missing(self, svc_authorize_mock, svc_run_command_mock, existing_rc_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'remotecluster': 'test_remotecluster',
+ 'name': 'test_name',
+ 'master': 'test_master',
+ 'copytype': 'metro',
+ 'sync': 'true',
+ 'consistgrp': 'test_consistency_group'
+ })
+ svc_run_command_mock.return_value = {}
+ with pytest.raises(AnsibleFailJson) as exc:
+ obj = IBMSVCManageReplication()
+ obj.create()
+ self.assertEqual(True, exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_replication.IBMSVCManageReplication.existing_rc')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_parameter_remotecluster_missing(self, svc_authorize_mock, svc_run_command_mock, existing_rc_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'master': 'test_master',
+ 'aux': 'test_aux',
+ 'copytype': 'metro',
+ 'sync': 'true',
+ 'consistgrp': 'test_consistency_group'
+ })
+ svc_run_command_mock.return_value = {}
+ with pytest.raises(AnsibleFailJson) as exc:
+ obj = IBMSVCManageReplication()
+ obj.create()
+ self.assertEqual(True, exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_delete(self, svc_authorize_mock, svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'state': 'absent',
+ 'force': 'true'
+ })
+ svc_run_command_mock.return_value = ''
+ obj = IBMSVCManageReplication()
+ delete_return = obj.delete()
+ self.assertEqual(None, delete_return)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_delete_failure(self, svc_authorize_mock, svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'state': 'absent',
+ 'force': 'true'
+ })
+ with pytest.raises(AnsibleFailJson) as exc:
+ obj = IBMSVCManageReplication()
+ obj.delete()
+ self.assertEqual(True, exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_replication.IBMSVCManageReplication.create')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_replication.IBMSVCManageReplication.existing_rc')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_relationship(self, svc_authorize_mock, svc_run_command_mock, existing_rc_mock, create_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'remotecluster': 'test_remotecluster',
+ 'master': 'test_master',
+ 'aux': 'test_aux',
+ 'copytype': 'metro',
+ 'sync': 'true',
+ 'consistgrp': 'test_consistency_group',
+ })
+ existing_rc_mock.return_value = {}
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj = IBMSVCManageReplication()
+ obj.apply()
+ self.assertEqual(True, exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_replication.IBMSVCManageReplication.existing_rc')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_delete_non_existing_relationship(self, svc_authorize_mock, svc_run_command_mock, existing_rc_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'absent',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'remotecluster': 'test_remotecluster',
+ 'master': 'test_master',
+ 'aux': 'test_aux',
+ 'copytype': 'metro',
+ 'sync': 'true',
+ 'consistgrp': 'test_consistency_group',
+ })
+ existing_rc_mock.return_value = {}
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj = IBMSVCManageReplication()
+ obj.apply()
+ self.assertEqual(False, exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_replication.IBMSVCManageReplication.existing_rc')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_for_failure_with_activeactive(self, svc_authorize_mock, svc_run_command_mock, existing_rc_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'absent',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'remotecluster': 'test_remotecluster',
+ 'master': 'test_master',
+ 'aux': 'test_aux',
+ 'copytype': 'metro',
+ 'sync': 'true',
+ 'consistgrp': 'test_consistency_group',
+ })
+ existing_rc_mock.return_value = {
+ 'id': '157',
+ 'name': 'test_name',
+ 'master_cluster_id': '0000020321E04566',
+ 'master_cluster_name': 'FlashSystem V9000',
+ 'master_vdisk_id': '157',
+ 'master_vdisk_name': 'test_master',
+ 'aux_cluster_id': '0000020321E04566',
+ 'aux_cluster_name': 'FlashSystem V9000',
+ 'aux_vdisk_id': '161',
+ 'aux_vdisk_name': 'test_aux',
+ 'primary': 'aux',
+ 'consistency_group_id': '8',
+ 'consistency_group_name': 'test_consistency_group',
+ 'state': 'consistent_synchronized',
+ 'bg_copy_priority': '50',
+ 'progress': '',
+ 'freeze_time': '',
+ 'status': 'online',
+ 'sync': '',
+ 'copy_type': 'activeactive',
+ 'cycling_mode': '',
+ 'cycle_period_seconds': '300',
+ 'master_change_vdisk_id': '',
+ 'master_change_vdisk_name': '',
+ 'aux_change_vdisk_id': '',
+ 'aux_change_vdisk_name': ''
+ }
+ with pytest.raises(AnsibleFailJson) as exc:
+ obj = IBMSVCManageReplication()
+ obj.apply()
+ self.assertEqual(True, exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_replication.IBMSVCManageReplication.delete')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_replication.IBMSVCManageReplication.existing_rc')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_delete_existing_relationship(self, svc_authorize_mock, svc_run_command_mock, existing_rc_mock, delete_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'absent',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ })
+ existing_rc_mock.return_value = {
+ 'id': '157',
+ 'name': 'test_name',
+ 'master_cluster_id': '0000020321E04566',
+ 'master_cluster_name': 'FlashSystem V9000',
+ 'master_vdisk_id': '157',
+ 'master_vdisk_name': 'test_master',
+ 'aux_cluster_id': '0000020321E04566',
+ 'aux_cluster_name': 'FlashSystem V9000',
+ 'aux_vdisk_id': '161',
+ 'aux_vdisk_name': 'test_aux',
+ 'primary': 'aux',
+ 'consistency_group_id': '8',
+ 'consistency_group_name': 'test_consistency_group',
+ 'state': 'consistent_synchronized',
+ 'bg_copy_priority': '50',
+ 'progress': '',
+ 'freeze_time': '',
+ 'status': 'online',
+ 'sync': '',
+ 'copy_type': 'metro',
+ 'cycling_mode': '',
+ 'cycle_period_seconds': '300',
+ 'master_change_vdisk_id': '',
+ 'master_change_vdisk_name': '',
+ 'aux_change_vdisk_id': '',
+ 'aux_change_vdisk_name': ''
+ }
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj = IBMSVCManageReplication()
+ obj.apply()
+ self.assertEqual(True, exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_replication.IBMSVCManageReplication.create')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_replication.IBMSVCManageReplication.existing_rc')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_existing_relationship(self, svc_authorize_mock, svc_run_command_mock, existing_rc_mock, create_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'remotecluster': 'test_remotecluster',
+ 'master': 'test_master',
+ 'aux': 'test_aux',
+ 'copytype': 'metro',
+ 'sync': 'true',
+ 'consistgrp': 'test_consistency_group',
+ })
+ existing_rc_mock.return_value = {
+ 'id': '157',
+ 'name': 'test_name',
+ 'master_cluster_id': '0000020321E04566',
+ 'master_cluster_name': 'FlashSystem V9000',
+ 'master_vdisk_id': '157',
+ 'master_vdisk_name': 'test_master',
+ 'aux_cluster_id': '0000020321E04566',
+ 'aux_cluster_name': 'FlashSystem V9000',
+ 'aux_vdisk_id': '161',
+ 'aux_vdisk_name': 'test_aux',
+ 'primary': 'aux',
+ 'consistency_group_id': '8',
+ 'consistency_group_name': 'test_consistency_group',
+ 'state': 'consistent_synchronized',
+ 'bg_copy_priority': '50',
+ 'progress': '',
+ 'freeze_time': '',
+ 'status': 'online',
+ 'sync': '',
+ 'copy_type': 'metro',
+ 'cycling_mode': '',
+ 'cycle_period_seconds': '300',
+ 'master_change_vdisk_id': '',
+ 'master_change_vdisk_name': '',
+ 'aux_change_vdisk_id': '',
+ 'aux_change_vdisk_name': ''
+ }
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj = IBMSVCManageReplication()
+ obj.apply()
+ self.assertEqual(False, exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_replication.IBMSVCManageReplication.create')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_replication.IBMSVCManageReplication.existing_rc')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_existing_relationship(self, svc_authorize_mock, svc_run_command_mock, existing_rc_mock, create_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'remotecluster': 'test_remotecluster',
+ 'master': 'test_master_1',
+ 'aux': 'test_aux_1',
+ 'copytype': 'global',
+ 'sync': 'true',
+ 'consistgrp': 'test_consistency_group_1',
+ })
+ existing_rc_mock.return_value = {
+ 'id': '157',
+ 'name': 'test_name',
+ 'master_cluster_id': '0000020321E04566',
+ 'master_cluster_name': 'FlashSystem V9000',
+ 'master_vdisk_id': '157',
+ 'master_vdisk_name': 'test_master',
+ 'aux_cluster_id': '0000020321E04566',
+ 'aux_cluster_name': 'FlashSystem V9000',
+ 'aux_vdisk_id': '161',
+ 'aux_vdisk_name': 'test_aux',
+ 'primary': 'aux',
+ 'consistency_group_id': '8',
+ 'consistency_group_name': 'test_consistency_group',
+ 'state': 'consistent_synchronized',
+ 'bg_copy_priority': '50',
+ 'progress': '',
+ 'freeze_time': '',
+ 'status': 'online',
+ 'sync': '',
+ 'copy_type': 'metro',
+ 'cycling_mode': '',
+ 'cycle_period_seconds': '300',
+ 'master_change_vdisk_id': '',
+ 'master_change_vdisk_name': '',
+ 'aux_change_vdisk_id': '',
+ 'aux_change_vdisk_name': ''
+ }
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj = IBMSVCManageReplication()
+ obj.apply()
+ self.assertEqual(True, exc.value.args[0]['changed'])
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_replicationgroup.py b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_replicationgroup.py
new file mode 100644
index 000000000..3933404d1
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_replicationgroup.py
@@ -0,0 +1,441 @@
+# Copyright (C) 2020 IBM CORPORATION
+# Author(s):
+#
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+""" unit tests IBM Storage Virtualize Ansible module: ibm_svc_manage_replication """
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import unittest
+import pytest
+import json
+from mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi
+from ansible_collections.ibm.storage_virtualize.plugins.modules.ibm_svc_manage_replicationgroup import IBMSVCRCCG
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation """
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the
+ test case """
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the
+ test case """
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an
+ exception """
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an
+ exception """
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class TestIBMSVCRCCG(unittest.TestCase):
+ """ a group of related Unit Tests"""
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def setUp(self, connect):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.restapi = IBMSVCRestApi(self.mock_module_helper, '1.2.3.4',
+ 'domain.ibm.com', 'username', 'password',
+ False, 'test.log', '')
+
+ def set_default_args(self):
+ return dict({
+ 'name': 'test',
+ 'state': 'present'
+ })
+
+ def test_module_fail_when_required_args_missing(self):
+ """ required arguments are reported as errors """
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ IBMSVCRCCG()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_get_existing_rccg(self, svc_authorize_mock, svc_obj_info_mock):
+ set_module_args({
+ 'clustername': 'test_remotecluster',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'state': 'present',
+ })
+ svc_obj_info_mock.return_value = {
+ "id": "11",
+ "name": "test_name",
+ "master_cluster_id": "0000020321E04566",
+ "master_cluster_name": "test_remotecluster",
+ "aux_cluster_id": "0000020321E04566",
+ "aux_cluster_name": "test_remotecluster",
+ "primary": "",
+ "state": "empty",
+ "relationship_count": "0",
+ "freeze_time": "",
+ "status": "",
+ "sync": "",
+ "copy_type": "metro",
+ "cycling_mode": "",
+ "cycle_period_seconds": "0"
+ }
+ obj = IBMSVCRCCG()
+ return_data = obj.get_existing_rccg()
+ self.assertEqual('test_name', return_data['name'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_rccg_probe(self, svc_authorize_mock, svc_obj_info_mock):
+ set_module_args({
+ 'clustername': 'test_remotecluster',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'state': 'present',
+ 'copytype': 'metro',
+ })
+ arg_data = {
+ "id": "11",
+ "name": "test_name",
+ "master_cluster_id": "0000020321E04566",
+ "master_cluster_name": "test_remotecluster",
+ "aux_cluster_id": "0000020321E04566",
+ "aux_cluster_name": "test_remotecluster",
+ "primary": "",
+ "state": "empty",
+ "relationship_count": "0",
+ "freeze_time": "",
+ "status": "",
+ "sync": "",
+ "copy_type": "global",
+ "cycling_mode": "",
+ "cycle_period_seconds": "0"
+ }
+ obj = IBMSVCRCCG()
+ return_data = obj.rccg_probe(arg_data)
+ self.assertIn('metro', return_data[0])
+ self.assertTrue(return_data[0]["metro"])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_rccg_probe_failure_when_invalid_input(self, svc_authorize_mock, svc_obj_info_mock):
+ set_module_args({
+ 'clustername': 'test_remotecluster',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'state': 'present',
+ 'copytype': 'invalid_input'
+ })
+ arg_data = {
+ "id": "11",
+ "name": "test_name",
+ "master_cluster_id": "0000020321E04566",
+ "master_cluster_name": "test_remotecluster",
+ "aux_cluster_id": "0000020321E04566",
+ "aux_cluster_name": "test_remotecluster",
+ "primary": "",
+ "state": "empty",
+ "relationship_count": "0",
+ "freeze_time": "",
+ "status": "",
+ "sync": "",
+ "copy_type": "global",
+ "cycling_mode": "",
+ "cycle_period_seconds": "0"
+ }
+ with pytest.raises(AnsibleFailJson) as exc:
+ obj = IBMSVCRCCG()
+ obj.rccg_probe(arg_data)
+ self.assertTrue(exc.value.args[0]["failed"])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_replicationgroup.IBMSVCRCCG.get_existing_rccg')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_rccg_create(self, svc_authorize_mock, get_existing_rccg_mock, svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'test_remotecluster',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'state': 'present',
+ 'copytype': 'metro'
+ })
+ get_existing_rccg_mock.return_value = {}
+ svc_run_command_mock.return_value = {
+ 'message': 'RC Consistency Group, id [3], successfully created',
+ 'id': '3'
+ }
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj = IBMSVCRCCG()
+ obj.rccg_create()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_rccg_update(self, svc_authorize_mock, svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'test_remotecluster',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'state': 'present',
+ 'copytype': 'metro',
+ 'cyclingperiod': 299
+ })
+ sample_modify = {
+ 'global': True
+ }
+ sample_modifycv = {
+ 'cycleperiodseconds': 300,
+ }
+ obj = IBMSVCRCCG()
+ return_data = obj.rccg_update(sample_modify, sample_modifycv)
+ self.assertEqual(None, return_data)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_replicationgroup.IBMSVCRCCG.get_existing_rccg')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_rccg_delete(self, svc_authorize_mock, get_existing_rccg_mock, svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'test_remotecluster',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'state': 'present',
+ 'copytype': 'metro'
+ })
+ get_existing_rccg_mock.return_value = {
+ "id": "11",
+ "name": "test_name",
+ "master_cluster_id": "0000020321E04566",
+ "master_cluster_name": "test_remotecluster",
+ "aux_cluster_id": "0000020321E04566",
+ "aux_cluster_name": "test_remotecluster",
+ "primary": "",
+ "state": "empty",
+ "relationship_count": "0",
+ "freeze_time": "",
+ "status": "",
+ "sync": "",
+ "copy_type": "metro",
+ "cycling_mode": "",
+ "cycle_period_seconds": "0"
+ }
+ svc_run_command_mock.return_value = None
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj = IBMSVCRCCG()
+ obj.rccg_delete()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_replicationgroup.IBMSVCRCCG.get_existing_rccg')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_deletion(self, svc_authorize_mock, svc_run_command_mock, get_existing_rccg_mock):
+ set_module_args({
+ 'clustername': 'test_remotecluster',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'state': 'absent',
+ })
+ get_existing_rccg_mock.return_value = {
+ "id": "11",
+ "name": "test_name",
+ "master_cluster_id": "0000020321E04566",
+ "master_cluster_name": "test_remotecluster",
+ "aux_cluster_id": "0000020321E04566",
+ "aux_cluster_name": "test_remotecluster",
+ "primary": "",
+ "state": "empty",
+ "relationship_count": "0",
+ "freeze_time": "",
+ "status": "",
+ "sync": "",
+ "copy_type": "metro",
+ "cycling_mode": "",
+ "cycle_period_seconds": "0"
+ }
+ svc_run_command_mock.return_value = None
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj = IBMSVCRCCG()
+ obj.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_replicationgroup.IBMSVCRCCG.get_existing_rccg')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_creation(self, svc_authorize_mock, svc_run_command_mock, get_existing_rccg_mock):
+ set_module_args({
+ 'clustername': 'test_remotecluster',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'state': 'present'
+ })
+ get_existing_rccg_mock.return_value = {}
+ svc_run_command_mock.return_value = {
+ 'message': 'RC Consistency Group, id [3], successfully created',
+ 'id': '3'
+ }
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj = IBMSVCRCCG()
+ obj.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_replicationgroup.IBMSVCRCCG.get_existing_rccg')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_updation(self, svc_authorize_mock, svc_run_command_mock, get_existing_rccg_mock):
+ set_module_args({
+ 'clustername': 'test_remotecluster',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'state': 'present',
+ 'copytype': 'global'
+ })
+ get_existing_rccg_mock.return_value = {
+ "id": "11",
+ "name": "test_name",
+ "master_cluster_id": "0000020321E04566",
+ "master_cluster_name": "test_remotecluster",
+ "aux_cluster_id": "0000020321E04566",
+ "aux_cluster_name": "test_remotecluster",
+ "primary": "",
+ "state": "empty",
+ "relationship_count": "0",
+ "freeze_time": "",
+ "status": "",
+ "sync": "",
+ "copy_type": "metro",
+ "cycling_mode": "",
+ "cycle_period_seconds": "0"
+ }
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj = IBMSVCRCCG()
+ obj.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_replicationgroup.IBMSVCRCCG.get_existing_rccg')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_existing(self, svc_authorize_mock, svc_run_command_mock, get_existing_rccg_mock):
+ set_module_args({
+ 'clustername': 'test_remotecluster',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'state': 'present',
+ 'copytype': 'metro'
+ })
+ get_existing_rccg_mock.return_value = {
+ "id": "11",
+ "name": "test_name",
+ "master_cluster_id": "0000020321E04566",
+ "master_cluster_name": "test_remotecluster",
+ "aux_cluster_id": "0000020321E04566",
+ "aux_cluster_name": "test_remotecluster",
+ "primary": "",
+ "state": "empty",
+ "relationship_count": "0",
+ "freeze_time": "",
+ "status": "",
+ "sync": "",
+ "copy_type": "metro",
+ "cycling_mode": "",
+ "cycle_period_seconds": "0"
+ }
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj = IBMSVCRCCG()
+ obj.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_replicationgroup.IBMSVCRCCG.get_existing_rccg')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_delete_non_existing(self, svc_authorize_mock, svc_run_command_mock, get_existing_rccg_mock):
+ set_module_args({
+ 'clustername': 'test_remotecluster',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'state': 'absent',
+ 'copytype': 'metro'
+ })
+ get_existing_rccg_mock.return_value = {}
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj = IBMSVCRCCG()
+ obj.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_safeguarded_policy.py b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_safeguarded_policy.py
new file mode 100644
index 000000000..e8c1d0d6d
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_safeguarded_policy.py
@@ -0,0 +1,324 @@
+# Copyright (C) 2022 IBM CORPORATION
+# Author(s): Sanjaikumaar M <sanjaikumaar.m@ibm.com>
+#
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+""" unit tests IBM Storage Virtualize Ansible module: ibm_svc_manage_safeguarded_policy """
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import unittest
+import pytest
+import json
+from mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi
+from ansible_collections.ibm.storage_virtualize.plugins.modules.ibm_svc_manage_safeguarded_policy import IBMSVCSafeguardedPolicy
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module
+ creation """
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the
+ test case """
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the
+ test case """
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an
+ exception """
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs):
+ """function to patch over fail_json; package return data into an
+ exception """
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class TestIBMSVCSafeguardedPolicy(unittest.TestCase):
+ """
+ Group of related Unit Tests
+ """
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def setUp(self, connect):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.restapi = IBMSVCRestApi(self.mock_module_helper, '1.2.3.4',
+ 'domain.ibm.com', 'username', 'password',
+ False, 'test.log', '')
+
+ def test_module_with_blank_values(self):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': ''
+ })
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ IBMSVCSafeguardedPolicy()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ def test_module_without_state_parameter(self):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'sgpolicy',
+ 'backupunit': 'day',
+ 'backupinterval': '1',
+ 'backupstarttime': '2102281800',
+ 'retentiondays': '2'
+ })
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ IBMSVCSafeguardedPolicy()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_safeguarded_policy.IBMSVCSafeguardedPolicy.is_sg_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_sg_policy(self, svc_authorize_mock, svc_run_command_mock, sg_exists_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'sgpolicy0',
+ 'backupunit': 'day',
+ 'backupinterval': '1',
+ 'backupstarttime': '2102281800',
+ 'retentiondays': '10',
+ 'state': 'present'
+ })
+
+ sg_exists_mock.return_value = {}
+
+ sg = IBMSVCSafeguardedPolicy()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ sg.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_sg_idempotency(self, svc_authorize_mock, svc_run_command_mock, svc_obj_info_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'sgpolicy0',
+ 'backupunit': 'day',
+ 'backupinterval': '1',
+ 'backupstarttime': '2102281800',
+ 'retentiondays': '10',
+ 'state': 'present'
+ })
+
+ svc_obj_info_mock.return_value = {
+ "policy_id": "3",
+ "policy_name": "sgpolicy0",
+ "schedule_id": "1",
+ "backup_unit": "day",
+ "backup_interval": "1",
+ "backup_start_time": "210228180000",
+ "retention_days": "10"
+ }
+
+ sg = IBMSVCSafeguardedPolicy()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ sg.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_safeguarded_policy.IBMSVCSafeguardedPolicy.is_sg_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_delete_sg_policy_failure(self, svc_authorize_mock, svc_run_command_mock, sg_exists_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'sgpolicy0',
+ 'backupunit': 'day',
+ 'backupinterval': '1',
+ 'backupstarttime': '2102281800',
+ 'retentiondays': '10',
+ 'state': 'absent'
+ })
+
+ sg_exists_mock.return_value = {
+ "policy_id": "3",
+ "policy_name": "sgpolicy0",
+ "schedule_id": "1",
+ "backup_unit": "day",
+ "backup_interval": "1",
+ "backup_start_time": "210228180000",
+ "retention_days": "10"
+ }
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ IBMSVCSafeguardedPolicy()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_safeguarded_policy.IBMSVCSafeguardedPolicy.is_sg_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_delete_sg_policy(self, svc_authorize_mock, svc_run_command_mock, sg_exists_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'sgpolicy0',
+ 'state': 'absent'
+ })
+
+ sg_exists_mock.return_value = {
+ "policy_id": "3",
+ "policy_name": "sgpolicy0",
+ "schedule_id": "1",
+ "backup_unit": "day",
+ "backup_interval": "1",
+ "backup_start_time": "210228180000",
+ "retention_days": "10"
+ }
+
+ sg = IBMSVCSafeguardedPolicy()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ sg.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_safeguarded_policy.IBMSVCSafeguardedPolicy.is_sg_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_delete_sg_idempotency(self, svc_authorize_mock, svc_run_command_mock, sg_exists_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'sgpolicy0',
+ 'state': 'absent'
+ })
+
+ sg_exists_mock.return_value = {}
+
+ sg = IBMSVCSafeguardedPolicy()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ sg.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_safeguarded_policy.IBMSVCSafeguardedPolicy.is_sg_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_suspend_sg_failure(self, svc_authorize_mock, svc_run_command_mock, sg_exists_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'sgpolicy0',
+ 'backupunit': 'day',
+ 'backupinterval': '1',
+ 'backupstarttime': '2102281800',
+ 'retentiondays': '10',
+ 'state': 'suspend'
+ })
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ IBMSVCSafeguardedPolicy()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_safeguarded_policy.IBMSVCSafeguardedPolicy.is_sg_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_suspend_sg(self, svc_authorize_mock, svc_run_command_mock, sg_exists_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'state': 'suspend'
+ })
+
+ sg = IBMSVCSafeguardedPolicy()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ sg.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_safeguarded_policy.IBMSVCSafeguardedPolicy.is_sg_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_resume_sg(self, svc_authorize_mock, svc_run_command_mock, sg_exists_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'state': 'resume'
+ })
+
+ sg = IBMSVCSafeguardedPolicy()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ sg.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_sra.py b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_sra.py
new file mode 100644
index 000000000..129878ada
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_sra.py
@@ -0,0 +1,401 @@
+# Copyright (C) 2020 IBM CORPORATION
+# Author(s): Sanjaikumaar M <sanjaikumaar.m@ibm.com>
+#
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+""" unit tests IBM Storage Virtualize Ansible module: ibm_svc_manage_sra """
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import unittest
+import pytest
+import json
+from mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi
+from ansible_collections.ibm.storage_virtualize.plugins.modules.ibm_svc_manage_sra import IBMSVCSupportRemoteAssistance
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module
+ creation """
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the
+ test case """
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the
+ test case """
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an
+ exception """
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs):
+ """function to patch over fail_json; package return data into an
+ exception """
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class TestIBMSVCSupportRemoteAssistance(unittest.TestCase):
+ """
+ Group of related Unit Tests
+ """
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def setUp(self, connect):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.restapi = IBMSVCRestApi(self.mock_module_helper, '1.2.3.4',
+ 'domain.ibm.com', 'username', 'password',
+ False, 'test.log', '')
+
+ def test_module_required_if_functionality(self):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'enabled',
+ 'username': 'username',
+ 'password': 'password',
+ 'support': 'remote'
+ })
+ with pytest.raises(AnsibleFailJson) as exc:
+ IBMSVCSupportRemoteAssistance()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ def test_module_required_together_functionality(self):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'enabled',
+ 'username': 'username',
+ 'password': 'password',
+ 'support': 'remote',
+ 'name': []
+ })
+ with pytest.raises(AnsibleFailJson) as exc:
+ IBMSVCSupportRemoteAssistance()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ def test_module_with_empty_list(self):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'enabled',
+ 'username': 'username',
+ 'password': 'password',
+ 'support': 'remote',
+ 'name': [],
+ 'sra_ip': [],
+ 'sra_port': []
+ })
+ with pytest.raises(AnsibleFailJson) as exc:
+ IBMSVCSupportRemoteAssistance()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ def test_module_with_blank_value_in_list(self):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'enabled',
+ 'username': 'username',
+ 'password': 'password',
+ 'support': 'remote',
+ 'name': [''],
+ 'sra_ip': [''],
+ 'sra_port': ['']
+ })
+ with pytest.raises(AnsibleFailJson) as exc:
+ IBMSVCSupportRemoteAssistance()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ def test_module_without_state_parameter(self):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'support': 'onsite'
+ })
+ with pytest.raises(AnsibleFailJson) as exc:
+ IBMSVCSupportRemoteAssistance()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ def test_module_onsite_with_unnecessary_args(self):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'support': 'onsite',
+ 'name': ['test_proxy']
+ })
+ with pytest.raises(AnsibleFailJson) as exc:
+ IBMSVCSupportRemoteAssistance()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ def test_module_with_unequal_proxy_arguments(self):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'enabled',
+ 'username': 'username',
+ 'password': 'password',
+ 'support': 'remote',
+ 'name': ['dummy_proxy'],
+ 'sra_ip': ['9.9.9.9', '9.9.9.9'],
+ 'sra_port': []
+ })
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ IBMSVCSupportRemoteAssistance()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_sra.IBMSVCSupportRemoteAssistance.is_sra_enabled')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_enable_sra_onsite(self, svc_authorize_mock,
+ svc_run_command_mock,
+ sra_enabled_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'enabled',
+ 'username': 'username',
+ 'password': 'password',
+ 'support': 'onsite',
+ })
+
+ sra_enabled_mock.return_value = False
+
+ sra_inst = IBMSVCSupportRemoteAssistance()
+ with pytest.raises(AnsibleExitJson) as exc:
+ sra_inst.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_sra.IBMSVCSupportRemoteAssistance.is_sra_enabled')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_sra.IBMSVCSupportRemoteAssistance.add_proxy_details')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_enable_sra_remote_with_proxy(self, svc_authorize_mock,
+ svc_run_command_mock,
+ add_proxy_mock,
+ sra_enabled_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'enabled',
+ 'username': 'username',
+ 'password': 'password',
+ 'support': 'remote',
+ 'name': ['customer_proxy'],
+ 'sra_ip': ['10.10.10.10'],
+ 'sra_port': [8888]
+ })
+
+ sra_enabled_mock.return_value = False
+
+ sra_inst = IBMSVCSupportRemoteAssistance()
+ with pytest.raises(AnsibleExitJson) as exc:
+ sra_inst.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_sra.IBMSVCSupportRemoteAssistance.is_sra_enabled')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_sra.IBMSVCSupportRemoteAssistance.remove_proxy_details')
+ def test_disable_sra_remote_negative(self, remove_proxy_mock,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ sra_enabled_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'disabled',
+ 'username': 'username',
+ 'password': 'password',
+ 'support': 'remote',
+ 'name': ['customer_proxy'],
+ 'sra_ip': ['10.10.10.10'],
+ 'sra_port': [8888]
+ })
+
+ sra_enabled_mock.return_value = True
+ with pytest.raises(AnsibleFailJson) as exc:
+ IBMSVCSupportRemoteAssistance()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_sra.IBMSVCSupportRemoteAssistance.is_sra_enabled')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_sra.IBMSVCSupportRemoteAssistance.remove_proxy_details')
+ def test_disable_sra_remote(self, remove_proxy_mock,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ sra_enabled_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'disabled',
+ 'username': 'username',
+ 'password': 'password',
+ 'support': 'remote',
+ 'name': ['customer_proxy']
+ })
+
+ sra_enabled_mock.return_value = True
+
+ sra_inst = IBMSVCSupportRemoteAssistance()
+ with pytest.raises(AnsibleExitJson) as exc:
+ sra_inst.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_sra.IBMSVCSupportRemoteAssistance.is_sra_enabled')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_enable_sra_twice(self, svc_authorize_mock,
+ svc_run_command_mock,
+ svc_obj_info_mock,
+ sra_enabled_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'enabled',
+ 'username': 'username',
+ 'password': 'password',
+ 'support': 'onsite',
+ })
+
+ svc_obj_info_mock.return_value = {'remote_support_enabled': 'yes'}
+ sra_enabled_mock.return_value = True
+
+ sra_inst = IBMSVCSupportRemoteAssistance()
+ with pytest.raises(AnsibleExitJson) as exc:
+ sra_inst.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_sra.IBMSVCSupportRemoteAssistance.is_sra_enabled')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_sra.IBMSVCSupportRemoteAssistance.is_remote_support_enabled')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_sra.IBMSVCSupportRemoteAssistance.add_proxy_details')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_update_sra(self, svc_authorize_mock,
+ add_proxy_mock,
+ remote_enabled_mock,
+ sra_enabled_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'enabled',
+ 'username': 'username',
+ 'password': 'password',
+ 'support': 'remote',
+ 'name': ['customer_proxy'],
+ 'sra_ip': ['10.10.10.10'],
+ 'sra_port': [8888]
+ })
+
+ sra_enabled_mock.return_value = True
+ remote_enabled_mock.return_value = False
+ add_proxy_mock.return_value = []
+
+ sra_inst = IBMSVCSupportRemoteAssistance()
+ with pytest.raises(AnsibleExitJson) as exc:
+ sra_inst.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_sra.IBMSVCSupportRemoteAssistance.is_sra_enabled')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_disable_sra(self, svc_authorize_mock,
+ svc_run_command_mock,
+ sra_enabled_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'disabled',
+ 'username': 'username',
+ 'password': 'password',
+ 'support': 'onsite',
+ })
+
+ sra_enabled_mock.return_value = True
+
+ sra_inst = IBMSVCSupportRemoteAssistance()
+ with pytest.raises(AnsibleExitJson) as exc:
+ sra_inst.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_sra.IBMSVCSupportRemoteAssistance.is_sra_enabled')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_disable_sra_twice(self, svc_authorize_mock,
+ svc_run_command_mock,
+ sra_enabled_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'disabled',
+ 'username': 'username',
+ 'password': 'password',
+ 'support': 'onsite',
+ })
+
+ sra_enabled_mock.return_value = False
+
+ sra_inst = IBMSVCSupportRemoteAssistance()
+ with pytest.raises(AnsibleExitJson) as exc:
+ sra_inst.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_user.py b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_user.py
new file mode 100644
index 000000000..5af6c46c1
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_user.py
@@ -0,0 +1,521 @@
+# Copyright (C) 2020 IBM CORPORATION
+# Author(s): Sreshtant Bohidar <sreshtant.bohidar@ibm.com>
+#
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+""" unit tests IBM Storage Virtualize Ansible module: ibm_svc_usergroup """
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import unittest
+import pytest
+import json
+from mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi
+from ansible_collections.ibm.storage_virtualize.plugins.modules.ibm_svc_manage_user import IBMSVCUser
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module
+ creation """
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the
+ test case """
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the
+ test case """
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an
+ exception """
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an
+ exception """
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class TestIBMSVCUser(unittest.TestCase):
+ """ a group of related Unit Tests"""
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def setUp(self, connect):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.restapi = IBMSVCRestApi(self.mock_module_helper, '1.2.3.4',
+ 'domain.ibm.com', 'username', 'password',
+ False, 'test.log', '')
+
+ def set_default_args(self):
+ return dict({
+ 'name': 'test',
+ 'state': 'present'
+ })
+
+ def test_module_fail_when_required_args_missing(self):
+ """ required arguments are reported as errors """
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ IBMSVCUser()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_get_existing_user(self, mock_svc_authorize, svc_obj_info_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'state': 'present',
+ 'name': 'userx',
+ 'user_password': 'Test@123',
+ 'auth_type': 'usergrp',
+ 'usergroup': 'Service',
+ })
+ svc_obj_info_mock.return_value = {
+ "id": "3",
+ "name": "userx",
+ "password": "yes",
+ "ssh_key": "no",
+ "remote": "no",
+ "usergrp_id": "3",
+ "usergrp_name": "Service",
+ "owner_id": "",
+ "owner_name": "",
+ "locked": "no",
+ "locked_until": "",
+ "password_expiry_time": "",
+ "password_change_required": "no"
+ }
+ ug = IBMSVCUser()
+ data = ug.get_existing_user()
+ self.assertEqual(data["name"], "userx")
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_basic_checks(self, mock_svc_authorize):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'state': 'present',
+ 'name': 'userx',
+ 'user_password': 'Test@123',
+ 'auth_type': 'usergrp',
+ 'usergroup': 'Service',
+ })
+ ug = IBMSVCUser()
+ data = ug.basic_checks()
+ self.assertEqual(data, None)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_get_existing_user(self, mock_svc_authorize, svc_obj_info_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'state': 'present',
+ 'name': 'userx',
+ 'user_password': 'Test@123',
+ 'auth_type': 'usergrp',
+ 'usergroup': 'Service',
+ })
+ svc_obj_info_mock.return_value = {
+ "id": "3",
+ "name": "userx",
+ "password": "yes",
+ "ssh_key": "no",
+ "remote": "no",
+ "usergrp_id": "3",
+ "usergrp_name": "Service",
+ "owner_id": "",
+ "owner_name": "",
+ "locked": "no",
+ "locked_until": "",
+ "password_expiry_time": "",
+ "password_change_required": "no"
+ }
+ ug = IBMSVCUser()
+ data = ug.get_existing_user()
+ self.assertEqual(data["name"], "userx")
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_user(self, mock_svc_authorize, svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'state': 'present',
+ 'name': 'userx',
+ 'user_password': 'Test@123',
+ 'auth_type': 'usergrp',
+ 'usergroup': 'Service',
+ })
+ svc_run_command_mock.return_value = {
+ 'id': '3',
+ 'message': 'User, id [3], successfully created'
+ }
+ ug = IBMSVCUser()
+ data = ug.create_user()
+ self.assertEqual(data, None)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_probe_user(self, mock_svc_authorize):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'state': 'present',
+ 'name': 'userx',
+ 'user_password': 'Test@123',
+ 'auth_type': 'usergrp',
+ 'usergroup': 'Monitor',
+ })
+ data = {
+ "id": "3",
+ "name": "userx",
+ "password": "yes",
+ "ssh_key": "no",
+ "remote": "no",
+ "usergrp_id": "3",
+ "usergrp_name": "Service",
+ "owner_id": "",
+ "owner_name": "",
+ "locked": "no",
+ "locked_until": "",
+ "password_expiry_time": "",
+ "password_change_required": "no"
+ }
+ ug = IBMSVCUser()
+ result = ug.probe_user(data)
+ self.assertEqual(result['usergrp'], 'Monitor')
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_update_user(self, mock_svc_authorize, svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'state': 'present',
+ 'name': 'userx',
+ 'user_password': 'Test@123',
+ 'auth_type': 'usergrp',
+ 'usergroup': 'Monitor',
+ })
+ data = {
+ 'usergrp': 'Monitor',
+ 'password': 'Test@123'
+ }
+ svc_run_command_mock.return_value = None
+ ug = IBMSVCUser()
+ result = ug.update_user(data)
+ self.assertEqual(result, None)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_remove_user(self, mock_svc_authorize, svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'state': 'absent',
+ 'name': 'userx',
+ })
+ svc_run_command_mock.return_value = None
+ ug = IBMSVCUser()
+ result = ug.remove_user()
+ self.assertEqual(result, None)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_creating_new_user(self, mock_svc_authorize, mock_soi, mock_src):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'state': 'absent',
+ 'name': 'userx',
+ 'user_password': 'userx@123',
+ 'usergroup': 'Monitor'
+ })
+ mock_soi.return_value = {}
+ mock_src.return_value = {
+ 'id': '3',
+ 'message': 'User, id [3], successfully created'
+ }
+ with pytest.raises(AnsibleExitJson) as exc:
+ ug = IBMSVCUser()
+ ug.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_creating_existing_user(self, mock_svc_authorize, mock_soi):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'state': 'present',
+ 'name': 'userx',
+ 'usergroup': 'Service'
+ })
+ mock_soi.return_value = {
+ "id": "3",
+ "name": "userx",
+ "password": "yes",
+ "ssh_key": "no",
+ "remote": "no",
+ "usergrp_id": "3",
+ "usergrp_name": "Service",
+ "owner_id": "",
+ "owner_name": "",
+ "locked": "no",
+ "locked_until": "",
+ "password_expiry_time": "",
+ "password_change_required": "no"
+ }
+ with pytest.raises(AnsibleExitJson) as exc:
+ ug = IBMSVCUser()
+ ug.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_updating_user(self, mock_svc_authorize, mock_soi, mock_src):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'state': 'present',
+ 'name': 'userx',
+ 'usergroup': 'Monitor',
+ 'lock': True,
+ 'forcepasswordchange': True
+ })
+ mock_soi.return_value = {
+ "id": "3",
+ "name": "userx",
+ "password": "yes",
+ "ssh_key": "no",
+ "remote": "no",
+ "usergrp_id": "3",
+ "usergrp_name": "Service",
+ "owner_id": "",
+ "owner_name": "",
+ "locked": "no",
+ "locked_until": "",
+ "password_expiry_time": "",
+ "password_change_required": "no"
+ }
+ mock_src.return_value = None
+ with pytest.raises(AnsibleExitJson) as exc:
+ ug = IBMSVCUser()
+ ug.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_removing_existing_user(self, mock_svc_authorize, mock_soi, mock_src):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'state': 'absent',
+ 'name': 'userx',
+ })
+ mock_soi.return_value = {
+ "id": "3",
+ "name": "userx",
+ "password": "yes",
+ "ssh_key": "no",
+ "remote": "no",
+ "usergrp_id": "3",
+ "usergrp_name": "Service",
+ "owner_id": "",
+ "owner_name": "",
+ "locked": "no",
+ "locked_until": "",
+ "password_expiry_time": "",
+ "password_change_required": "no"
+ }
+ mock_src.return_value = None
+ with pytest.raises(AnsibleExitJson) as exc:
+ ug = IBMSVCUser()
+ ug.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_removing_non_existing_user(self, mock_svc_authorize, mock_soi):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'state': 'absent',
+ 'name': 'userx',
+ })
+ mock_soi.return_value = {}
+ with pytest.raises(AnsibleExitJson) as exc:
+ ug = IBMSVCUser()
+ ug.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_missing_mandatory_parameters(self, mock_svc_authorize):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ })
+ with pytest.raises(AnsibleFailJson) as exc:
+ ug = IBMSVCUser()
+ ug.apply()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_missing_parameter_during_creation(self, mock_svc_authorize):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'userx',
+ 'state': 'present',
+ })
+ with pytest.raises(AnsibleFailJson) as exc:
+ ug = IBMSVCUser()
+ ug.apply()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_mutually_exclusive_nopassword(self, mock_svc_authorize):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'state': 'present',
+ 'name': 'userx',
+ 'user_password': 'Test@123',
+ 'nopassword': True,
+ 'auth_type': 'usergrp',
+ 'usergroup': 'Service',
+ })
+ with pytest.raises(AnsibleFailJson) as exc:
+ ug = IBMSVCUser()
+ ug.apply()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_mutually_exclusive_nokey(self, mock_svc_authorize):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'state': 'present',
+ 'name': 'userx',
+ 'user_password': 'Test@123',
+ 'keyfile': 'keyfile-path',
+ 'nokey': True,
+ 'auth_type': 'usergrp',
+ 'usergroup': 'Service',
+ })
+ with pytest.raises(AnsibleFailJson) as exc:
+ ug = IBMSVCUser()
+ ug.apply()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_mutually_exclusive_nokey(self, mock_svc_authorize):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'state': 'present',
+ 'name': 'userx',
+ 'user_password': 'Test@123',
+ 'keyfile': 'keyfile-path',
+ 'auth_type': 'usergrp',
+ 'usergroup': 'Service',
+ 'lock': True,
+ 'unlock': True
+ })
+ with pytest.raises(AnsibleFailJson) as exc:
+ ug = IBMSVCUser()
+ ug.apply()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_usergroup.py b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_usergroup.py
new file mode 100644
index 000000000..a05a6557c
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_usergroup.py
@@ -0,0 +1,448 @@
+# Copyright (C) 2020 IBM CORPORATION
+# Author(s): Sreshtant Bohidar <sreshtant.bohidar@ibm.com>
+#
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+""" unit tests IBM Storage Virtualize Ansible module: ibm_svc_usergroup """
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import unittest
+import pytest
+import json
+from mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi
+from ansible_collections.ibm.storage_virtualize.plugins.modules.ibm_svc_manage_usergroup import IBMSVCUsergroup
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module
+ creation """
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the
+ test case """
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the
+ test case """
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an
+ exception """
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an
+ exception """
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class TestIBMSVCUsergroup(unittest.TestCase):
+ """ a group of related Unit Tests"""
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def setUp(self, connect):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.restapi = IBMSVCRestApi(self.mock_module_helper, '1.2.3.4',
+ 'domain.ibm.com', 'username', 'password',
+ False, 'test.log', '')
+
+ def set_default_args(self):
+ return dict({
+ 'name': 'test',
+ 'state': 'present'
+ })
+
+ def test_module_fail_when_required_args_missing(self):
+ """ required arguments are reported as errors """
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ IBMSVCUsergroup()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_basic_checks(self, mock_svc_authorize):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volume',
+ 'state': 'present',
+ 'role': 'Monitor',
+ 'ownershipgroup': 'ownershipgroupx'
+ })
+ ug = IBMSVCUsergroup()
+ data = ug.basic_checks()
+ self.assertEqual(data, None)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_get_existing_usergroup(self, mock_svc_authorize, svc_obj_info_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volume',
+ 'state': 'present',
+ 'role': 'Monitor',
+ 'ownershipgroup': 'ownershipgroupx'
+ })
+ svc_obj_info_mock.return_value = {
+ "id": "8",
+ "name": "test_usergrp",
+ "role": "Monitor",
+ "remote": "no",
+ "owner_id": "1",
+ "owner_name": "ownershipgroupx"
+ }
+ ug = IBMSVCUsergroup()
+ data = ug.get_existing_usergroup()
+ self.assertEqual(data["name"], "test_usergrp")
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_user_group(self, mock_svc_authorize, svc_run_command):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volume',
+ 'state': 'present',
+ 'role': 'Monitor',
+ 'ownershipgroup': 'ownershipgroupx'
+ })
+ svc_run_command.return_value = {
+ "message": "User Group, id [6], successfully created",
+ "id": 6
+ }
+ ug = IBMSVCUsergroup()
+ data = ug.create_user_group()
+ self.assertEqual(data, None)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_probe_user_group(self, mock_svc_authorize):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volume',
+ 'state': 'present',
+ 'role': 'Monitor',
+ 'ownershipgroup': 'ownershipgroupx'
+ })
+ data = {
+ "id": "8",
+ "name": "test_usergrp",
+ "role": "Service",
+ "remote": "no",
+ "owner_id": "1",
+ "owner_name": "ownershipgroupy"
+ }
+ ug = IBMSVCUsergroup()
+ data = ug.probe_user_group(data)
+ self.assertTrue('role' in data)
+ self.assertTrue('ownershipgroup' in data)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_update_user_group(self, mock_svc_authorize, svc_run_command):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volume',
+ 'state': 'present',
+ 'role': 'Monitor',
+ 'ownershipgroup': 'ownershipgroupx'
+ })
+ data = {
+ "role": "Service",
+ "ownershipgroup": "ownershipgroupy"
+ }
+ ug = IBMSVCUsergroup()
+ data = ug.update_user_group(data)
+ self.assertEqual(data, None)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_remove_user_group(self, mock_svc_authorize, svc_run_command):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volume',
+ 'state': 'absent',
+ })
+ svc_run_command.return_value = None
+ ug = IBMSVCUsergroup()
+ data = ug.remove_user_group()
+ self.assertEqual(data, None)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_new_ownershipgroup(self, mock_svc_authorize, soi, src):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volume',
+ 'state': 'present',
+ 'role': 'Monitor',
+ 'ownershipgroup': 'ownershipgroupx'
+ })
+ soi.return_value = {}
+ src.return_value = {
+ "message": "User Group, id [6], successfully created",
+ "id": 6
+ }
+ ug = IBMSVCUsergroup()
+ with pytest.raises(AnsibleExitJson) as exc:
+ ug.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_existing_ownershipgroup(self, mock_svc_authorize, soi):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volume',
+ 'state': 'present',
+ 'role': 'Monitor',
+ 'ownershipgroup': 'ownershipgroupx'
+ })
+ soi.return_value = {
+ "id": "8",
+ "name": "test_usergrp",
+ "role": "Monitor",
+ "remote": "no",
+ "owner_id": "1",
+ "owner_name": "ownershipgroupx"
+ }
+ ug = IBMSVCUsergroup()
+ with pytest.raises(AnsibleExitJson) as exc:
+ ug.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_update_existing_ownershipgroup(self, mock_svc_authorize, soi, src):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volume',
+ 'state': 'present',
+ 'role': 'Service',
+ 'ownershipgroup': 'ownershipgroupy'
+ })
+ soi.return_value = {
+ "id": "8",
+ "name": "test_usergrp",
+ "role": "Monitor",
+ "remote": "no",
+ "owner_id": "1",
+ "owner_name": "ownershipgroupx"
+ }
+ src.return_value = None
+ ug = IBMSVCUsergroup()
+ with pytest.raises(AnsibleExitJson) as exc:
+ ug.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_noownershipgroup(self, mock_svc_authorize, soi, src):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volume',
+ 'state': 'present',
+ 'role': 'Service',
+ 'noownershipgroup': True
+ })
+ soi.return_value = {
+ "id": "8",
+ "name": "test_usergrp",
+ "role": "Monitor",
+ "remote": "no",
+ "owner_id": "1",
+ "owner_name": "ownershipgroupx"
+ }
+ src.return_value = None
+ ug = IBMSVCUsergroup()
+ with pytest.raises(AnsibleExitJson) as exc:
+ ug.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_remove_existing_ownershipgroup(self, mock_svc_authorize, soi, src):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volume',
+ 'state': 'absent',
+ })
+ soi.return_value = {
+ "id": "8",
+ "name": "test_usergrp",
+ "role": "Monitor",
+ "remote": "no",
+ "owner_id": "1",
+ "owner_name": "ownershipgroupx"
+ }
+ src.return_value = None
+ ug = IBMSVCUsergroup()
+ with pytest.raises(AnsibleExitJson) as exc:
+ ug.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_remove_non_existing_ownershipgroup(self, mock_svc_authorize, soi):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volume',
+ 'state': 'absent',
+ })
+ soi.return_value = {}
+ ug = IBMSVCUsergroup()
+ with pytest.raises(AnsibleExitJson) as exc:
+ ug.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_failure_missing_name_parameter(self, mock_svc_authorize):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'state': 'absent',
+ })
+ with pytest.raises(AnsibleFailJson) as exc:
+ ug = IBMSVCUsergroup()
+ ug.apply()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_failure_missing_state_parameter(self, mock_svc_authorize):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_usergrp'
+ })
+ with pytest.raises(AnsibleFailJson) as exc:
+ ug = IBMSVCUsergroup()
+ ug.apply()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_failure_missing_role_parameter_during_creation(self, mock_svc_authorize):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_usergrp',
+ 'state': 'present'
+ })
+ with pytest.raises(AnsibleFailJson) as exc:
+ ug = IBMSVCUsergroup()
+ ug.apply()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_mutually_exclusive_noownershipgroup(self, mock_svc_authorize):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_usergrp',
+ 'state': 'present',
+ 'ownershipgroup': 'ownershipgroup-name',
+ 'noownershipgroup': True
+ })
+ with pytest.raises(AnsibleFailJson) as exc:
+ ug = IBMSVCUsergroup()
+ ug.apply()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_volume.py b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_volume.py
new file mode 100644
index 000000000..bfe1cfedd
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_volume.py
@@ -0,0 +1,1731 @@
+# Copyright (C) 2020 IBM CORPORATION
+# Author(s): Sreshtant Bohidar <sreshtant.bohidar@ibm.com>
+#
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+""" unit tests IBM Storage Virtualize Ansible module: ibm_svc_manage_volume """
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import unittest
+import pytest
+import json
+from mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi
+from ansible_collections.ibm.storage_virtualize.plugins.modules.ibm_svc_manage_volume import IBMSVCvolume
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module
+ creation """
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the
+ test case """
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the
+ test case """
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an
+ exception """
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an
+ exception """
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class TestIBMSVCvolume(unittest.TestCase):
+ """ a group of related Unit Tests"""
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def setUp(self, connect):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.restapi = IBMSVCRestApi(self.mock_module_helper, '1.2.3.4',
+ 'domain.ibm.com', 'username', 'password',
+ False, 'test.log', '')
+
+ def set_default_args(self):
+ return dict({
+ 'name': 'test',
+ 'state': 'present'
+ })
+
+ def test_module_fail_when_required_args_missing(self):
+ """ required arguments are reported as errors """
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ IBMSVCvolume()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_assemble_iogrp(self, svc_authorize_mock, svc_obj_info_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volume',
+ 'pool': 'test_pool',
+ 'size': '1',
+ 'unit': 'gb',
+ 'iogrp': 'io_grp0, io_grp1'
+ })
+ svc_obj_info_mock.return_value = [
+ {
+ "id": "0", "name": "io_grp0", "node_count": "2", "vdisk_count": "4",
+ "host_count": "1", "site_id": "1", "site_name": "site1"
+ }, {
+ "id": "1", "name": "io_grp1", "node_count": "2", "vdisk_count": "0",
+ "host_count": "1", "site_id": "2", "site_name": "site2"
+ }, {
+ "id": "2", "name": "io_grp2", "node_count": "0", "vdisk_count": "0",
+ "host_count": "1", "site_id": "", "site_name": ""
+ }, {
+ "id": "3", "name": "io_grp3", "node_count": "0", "vdisk_count": "0",
+ "host_count": "1", "site_id": "", "site_name": ""
+ }, {
+ "id": "4", "name": "recovery_io_grp", "node_count": "0", "vdisk_count": "0",
+ "host_count": "0", "site_id": "", "site_name": ""
+ }
+ ]
+ v = IBMSVCvolume()
+ v.assemble_iogrp()
+ self.assertTrue(type(v.iogrp) is list)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_failure_with_empty_or_nonexisting_iogrp(self, svc_authorize_mock, svc_obj_info_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volume',
+ 'pool': 'test_pool',
+ 'size': '1',
+ 'unit': 'gb',
+ 'iogrp': 'io_grp0, io_grp1, io_grp2, io_grp10'
+ })
+ svc_obj_info_mock.return_value = [
+ {
+ "id": "0", "name": "io_grp0", "node_count": "2", "vdisk_count": "4",
+ "host_count": "1", "site_id": "1", "site_name": "site1"
+ }, {
+ "id": "1", "name": "io_grp1", "node_count": "2", "vdisk_count": "0",
+ "host_count": "1", "site_id": "2", "site_name": "site2"
+ }, {
+ "id": "2", "name": "io_grp2", "node_count": "0", "vdisk_count": "0",
+ "host_count": "1", "site_id": "", "site_name": ""
+ }, {
+ "id": "3", "name": "io_grp3", "node_count": "0", "vdisk_count": "0",
+ "host_count": "1", "site_id": "", "site_name": ""
+ }, {
+ "id": "4", "name": "recovery_io_grp", "node_count": "0", "vdisk_count": "0",
+ "host_count": "0", "site_id": "", "site_name": ""
+ }
+ ]
+ v = IBMSVCvolume()
+ with pytest.raises(AnsibleFailJson) as exc:
+ v.assemble_iogrp()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_mandatory_parameter_validation(self, svc_authorize_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volume',
+ 'pool': 'test_pool',
+ 'size': '1',
+ 'unit': 'gb',
+ 'iogrp': 'io_grp0, io_grp1'
+ })
+ v = IBMSVCvolume()
+ v.mandatory_parameter_validation()
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_failure_when_mandatory_parameter_are_missing(self, svc_authorize_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'pool': 'test_pool',
+ 'size': '1',
+ 'unit': 'gb',
+ 'iogrp': 'io_grp0, io_grp1'
+ })
+ with pytest.raises(AnsibleFailJson) as exc:
+ v = IBMSVCvolume()
+ v.mandatory_parameter_validation()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_failure_when_both_parameter_volumegroup_and_novolumegroup_are_used(self, svc_authorize_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volume',
+ 'pool': 'test_pool',
+ 'size': '1',
+ 'unit': 'gb',
+ 'iogrp': 'io_grp0, io_grp1',
+ 'volumegroup': 'test_volumegroup',
+ 'novolumegroup': True
+ })
+ with pytest.raises(AnsibleFailJson) as exc:
+ v = IBMSVCvolume()
+ v.mandatory_parameter_validation()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_volume_creation_parameter_validation(self, svc_authorize_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volume',
+ 'pool': 'test_pool',
+ 'size': '1',
+ 'unit': 'gb',
+ 'iogrp': 'io_grp0, io_grp1',
+ 'volumegroup': 'test_volumegroup'
+ })
+ v = IBMSVCvolume()
+ v.volume_creation_parameter_validation()
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_failure_when_volume_creation_parameter_are_missing(self, svc_authorize_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volume',
+ 'iogrp': 'io_grp0, io_grp1',
+ 'volumegroup': 'test_volumegroup'
+ })
+ with pytest.raises(AnsibleFailJson) as exc:
+ v = IBMSVCvolume()
+ v.volume_creation_parameter_validation()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_validate_volume_type(self, svc_authorize_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volume',
+ 'pool': 'test_pool',
+ 'size': '1',
+ 'unit': 'gb',
+ 'iogrp': 'io_grp0, io_grp1',
+ 'volumegroup': 'test_volumegroup'
+ })
+ data = [
+ {
+ "id": "24", "name": "test_volume", "IO_group_id": "0", "IO_group_name": "io_grp0", "status": "online",
+ "mdisk_grp_id": "2", "mdisk_grp_name": "site1pool1", "capacity": "1073741824", "type": "striped",
+ "formatted": "yes", "formatting": "no", "mdisk_id": "", "mdisk_name": "", "FC_id": "", "FC_name": "",
+ "RC_id": "", "RC_name": "", "vdisk_UID": "60050768108180ED700000000000002E", "preferred_node_id": "1",
+ "fast_write_state": "empty", "cache": "readwrite", "udid": "", "fc_map_count": "0", "sync_rate": "50",
+ "copy_count": "1", "se_copy_count": "0", "filesystem": "", "mirror_write_priority": "latency",
+ "RC_change": "no", "compressed_copy_count": "0", "access_IO_group_count": "2", "last_access_time": "",
+ "parent_mdisk_grp_id": "2", "parent_mdisk_grp_name": "site1pool1", "owner_type": "none", "owner_id": "",
+ "owner_name": "", "encrypt": "no", "volume_id": "24", "volume_name": "test_volume", "function": "", "throttle_id": "",
+ "throttle_name": "", "IOPs_limit": "", "bandwidth_limit_MB": "", "volume_group_id": "0",
+ "volume_group_name": "test_volumegroup", "cloud_backup_enabled": "no", "cloud_account_id": "",
+ "cloud_account_name": "", "backup_status": "off", "last_backup_time": "", "restore_status": "none",
+ "backup_grain_size": "", "deduplicated_copy_count": "0", "protocol": "", "preferred_node_name": "node1",
+ "safeguarded_expiration_time": "", "safeguarded_backup_count": "0"
+ }, {
+ "copy_id": "0", "status": "online",
+ "sync": "yes", "auto_delete": "no", "primary": "yes", "mdisk_grp_id": "2", "mdisk_grp_name": "site1pool1",
+ "type": "striped", "mdisk_id": "", "mdisk_name": "", "fast_write_state": "empty", "used_capacity": "1073741824",
+ "real_capacity": "1073741824", "free_capacity": "0", "overallocation": "100", "autoexpand": "", "warning": "",
+ "grainsize": "", "se_copy": "no", "easy_tier": "on", "easy_tier_status": "balanced", "tiers": [
+ {"tier": "tier_scm", "tier_capacity": "0"},
+ {"tier": "tier0_flash", "tier_capacity": "1073741824"},
+ {"tier": "tier1_flash", "tier_capacity": "0"},
+ {"tier": "tier_enterprise", "tier_capacity": "0"},
+ {"tier": "tier_nearline", "tier_capacity": "0"}
+ ], "compressed_copy": "no", "uncompressed_used_capacity": "1073741824", "parent_mdisk_grp_id": "2",
+ "parent_mdisk_grp_name": "site1pool1", "encrypt": "no", "deduplicated_copy": "no",
+ "used_capacity_before_reduction": "", "safeguarded_mdisk_grp_id": "", "safeguarded_mdisk_grp_name": ""
+ }
+ ]
+ v = IBMSVCvolume()
+ v.validate_volume_type(data)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_failure_for_unsupported_volume_type(self, svc_authorize_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volume',
+ 'pool': 'test_pool',
+ 'size': '1',
+ 'unit': 'gb',
+ 'iogrp': 'io_grp0, io_grp1',
+ 'volumegroup': 'test_volumegroup'
+ })
+ data = [
+ {
+ "id": "26", "name": "abc", "IO_group_id": "0", "IO_group_name": "io_grp0", "status": "online",
+ "mdisk_grp_id": "2", "mdisk_grp_name": "site1pool1", "capacity": "1073741824", "type": "striped",
+ "formatted": "yes", "formatting": "no", "mdisk_id": "", "mdisk_name": "", "FC_id": "", "FC_name": "",
+ "RC_id": "26", "RC_name": "rcrel7", "vdisk_UID": "60050768108180ED700000000000002D",
+ "preferred_node_id": "2", "fast_write_state": "empty", "cache": "readwrite", "udid": "",
+ "fc_map_count": "0", "sync_rate": "50", "copy_count": "1", "se_copy_count": "0", "filesystem": "",
+ "mirror_write_priority": "latency", "RC_change": "no", "compressed_copy_count": "0",
+ "access_IO_group_count": "2", "last_access_time": "", "parent_mdisk_grp_id": "2",
+ "parent_mdisk_grp_name": "site1pool1", "owner_type": "none", "owner_id": "", "owner_name": "", "encrypt": "no",
+ "volume_id": "26", "volume_name": "abc", "function": "master", "throttle_id": "", "throttle_name": "",
+ "IOPs_limit": "", "bandwidth_limit_MB": "", "volume_group_id": "0", "volume_group_name": "test_volumegroup",
+ "cloud_backup_enabled": "no", "cloud_account_id": "", "cloud_account_name": "", "backup_status": "off",
+ "last_backup_time": "", "restore_status": "none", "backup_grain_size": "", "deduplicated_copy_count": "0",
+ "protocol": "", "preferred_node_name": "node2", "safeguarded_expiration_time": "",
+ "safeguarded_backup_count": "0"
+ },
+ {
+ "copy_id": "0", "status": "online", "sync": "yes", "auto_delete": "no",
+ "primary": "yes", "mdisk_grp_id": "2", "mdisk_grp_name": "site1pool1", "type": "striped", "mdisk_id": "",
+ "mdisk_name": "", "fast_write_state": "empty", "used_capacity": "1073741824", "real_capacity": "1073741824",
+ "free_capacity": "0", "overallocation": "100", "autoexpand": "", "warning": "", "grainsize": "", "se_copy": "no",
+ "easy_tier": "on", "easy_tier_status": "balanced", "tiers": [
+ {"tier": "tier_scm", "tier_capacity": "0"},
+ {"tier": "tier0_flash", "tier_capacity": "1073741824"},
+ {"tier": "tier1_flash", "tier_capacity": "0"},
+ {"tier": "tier_enterprise", "tier_capacity": "0"},
+ {"tier": "tier_nearline", "tier_capacity": "0"}
+ ], "compressed_copy": "no", "uncompressed_used_capacity": "1073741824", "parent_mdisk_grp_id": "2",
+ "parent_mdisk_grp_name": "site1pool1", "encrypt": "no", "deduplicated_copy": "no",
+ "used_capacity_before_reduction": "", "safeguarded_mdisk_grp_id": "", "safeguarded_mdisk_grp_name": ""
+ }
+ ]
+ with pytest.raises(AnsibleFailJson) as exc:
+ v = IBMSVCvolume()
+ v.validate_volume_type(data)
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_get_existing_volume(self, svc_authorize_mock, svc_obj_info_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volume',
+ 'pool': 'test_pool',
+ 'size': '1',
+ 'unit': 'gb',
+ 'iogrp': 'io_grp0, io_grp1',
+ 'volumegroup': 'test_volumegroup'
+ })
+ svc_obj_info_mock.return_value = [
+ {
+ "id": "24", "name": "test_volume", "IO_group_id": "0", "IO_group_name": "io_grp0", "status": "online",
+ "mdisk_grp_id": "2", "mdisk_grp_name": "site1pool1", "capacity": "1073741824", "type": "striped",
+ "formatted": "yes", "formatting": "no", "mdisk_id": "", "mdisk_name": "", "FC_id": "", "FC_name": "",
+ "RC_id": "", "RC_name": "", "vdisk_UID": "60050768108180ED700000000000002E", "preferred_node_id": "1",
+ "fast_write_state": "empty", "cache": "readwrite", "udid": "", "fc_map_count": "0", "sync_rate": "50",
+ "copy_count": "1", "se_copy_count": "0", "filesystem": "", "mirror_write_priority": "latency",
+ "RC_change": "no", "compressed_copy_count": "0", "access_IO_group_count": "2", "last_access_time": "",
+ "parent_mdisk_grp_id": "2", "parent_mdisk_grp_name": "site1pool1", "owner_type": "none", "owner_id": "",
+ "owner_name": "", "encrypt": "no", "volume_id": "24", "volume_name": "test_volume", "function": "", "throttle_id": "",
+ "throttle_name": "", "IOPs_limit": "", "bandwidth_limit_MB": "", "volume_group_id": "0",
+ "volume_group_name": "test_volumegroup", "cloud_backup_enabled": "no", "cloud_account_id": "",
+ "cloud_account_name": "", "backup_status": "off", "last_backup_time": "", "restore_status": "none",
+ "backup_grain_size": "", "deduplicated_copy_count": "0", "protocol": "", "preferred_node_name": "node1",
+ "safeguarded_expiration_time": "", "safeguarded_backup_count": "0"
+ }, {
+ "copy_id": "0", "status": "online",
+ "sync": "yes", "auto_delete": "no", "primary": "yes", "mdisk_grp_id": "2", "mdisk_grp_name": "site1pool1",
+ "type": "striped", "mdisk_id": "", "mdisk_name": "", "fast_write_state": "empty", "used_capacity": "1073741824",
+ "real_capacity": "1073741824", "free_capacity": "0", "overallocation": "100", "autoexpand": "", "warning": "",
+ "grainsize": "", "se_copy": "no", "easy_tier": "on", "easy_tier_status": "balanced", "tiers": [
+ {"tier": "tier_scm", "tier_capacity": "0"},
+ {"tier": "tier0_flash", "tier_capacity": "1073741824"},
+ {"tier": "tier1_flash", "tier_capacity": "0"},
+ {"tier": "tier_enterprise", "tier_capacity": "0"},
+ {"tier": "tier_nearline", "tier_capacity": "0"}
+ ], "compressed_copy": "no", "uncompressed_used_capacity": "1073741824", "parent_mdisk_grp_id": "2",
+ "parent_mdisk_grp_name": "site1pool1", "encrypt": "no", "deduplicated_copy": "no",
+ "used_capacity_before_reduction": "", "safeguarded_mdisk_grp_id": "", "safeguarded_mdisk_grp_name": ""
+ }
+ ]
+ v = IBMSVCvolume()
+ data = v.get_existing_volume('test_volume')
+ self.assertEqual(data[0]['name'], 'test_volume')
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_get_existing_iogrp(self, svc_authorize_mock, svc_obj_info_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volume',
+ 'pool': 'test_pool',
+ 'size': '1',
+ 'unit': 'gb',
+ 'iogrp': 'io_grp0, io_grp1',
+ 'volumegroup': 'test_volumegroup'
+ })
+ svc_obj_info_mock.return_value = [
+ {
+ "vdisk_id": "24",
+ "vdisk_name": "test_volume",
+ "IO_group_id": "0",
+ "IO_group_name": "io_grp0"
+ },
+ {
+ "vdisk_id": "24",
+ "vdisk_name": "test_volume",
+ "IO_group_id": "1",
+ "IO_group_name": "io_grp1"
+ }
+ ]
+ v = IBMSVCvolume()
+ data = v.get_existing_iogrp()
+ self.assertTrue('io_grp0' in data)
+ self.assertTrue('io_grp1' in data)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_volume(self, svc_authorize_mock, svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volume',
+ 'pool': 'test_pool',
+ 'size': '1',
+ 'unit': 'gb',
+ 'iogrp': 'io_grp0, io_grp1',
+ 'volumegroup': 'test_volumegroup'
+ })
+ svc_run_command_mock.return_value = {
+ 'id': '25',
+ 'message': 'Volume, id [25], successfully created'
+ }
+ v = IBMSVCvolume()
+ v.create_volume()
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_remove_volume(self, svc_authorize_mock, svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'absent',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volume'
+ })
+ svc_run_command_mock.return_value = None
+ v = IBMSVCvolume()
+ v.remove_volume()
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_failure_remove_volume_with_invalid_params(self, svc_authorize_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volume',
+ 'pool': 'test_pool',
+ 'size': '1',
+ 'unit': 'gb',
+ 'iogrp': 'io_grp0, io_grp1',
+ 'volumegroup': 'test_volumegroup',
+ 'state': 'absent'
+ })
+ with pytest.raises(AnsibleFailJson) as exc:
+ v = IBMSVCvolume()
+ v.apply()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_convert_to_bytes(self, svc_authorize_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'absent',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volume',
+ 'pool': 'test_pool',
+ 'size': '1',
+ 'unit': 'gb',
+ 'iogrp': 'io_grp0, io_grp1',
+ 'volumegroup': 'test_volumegroup'
+ })
+
+ v = IBMSVCvolume()
+ size_in_bytes = v.convert_to_bytes()
+ self.assertEqual(size_in_bytes, 1073741824)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_probe_volume(self, svc_authorize_mock, svc_obj_info_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'absent',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volume',
+ 'pool': 'test_pool',
+ 'size': '1',
+ 'unit': 'gb',
+ 'iogrp': 'io_grp0',
+ 'volumegroup': 'test_volumegroup'
+ })
+ svc_obj_info_mock.return_value = [
+ {
+ "vdisk_id": "24",
+ "vdisk_name": "test_volume",
+ "IO_group_id": "0",
+ "IO_group_name": "io_grp0"
+ },
+ {
+ "vdisk_id": "24",
+ "vdisk_name": "test_volume",
+ "IO_group_id": "1",
+ "IO_group_name": "io_grp1"
+ }
+ ]
+ data = [
+ {
+ "id": "24", "name": "test_volume", "IO_group_id": "0", "IO_group_name": "io_grp0", "status": "online",
+ "mdisk_grp_id": "2", "mdisk_grp_name": "site1pool1", "capacity": "100000000", "type": "striped",
+ "formatted": "yes", "formatting": "no", "mdisk_id": "", "mdisk_name": "", "FC_id": "", "FC_name": "",
+ "RC_id": "", "RC_name": "", "vdisk_UID": "60050768108180ED700000000000002E", "preferred_node_id": "1",
+ "fast_write_state": "empty", "cache": "readwrite", "udid": "", "fc_map_count": "0", "sync_rate": "50",
+ "copy_count": "1", "se_copy_count": "0", "filesystem": "", "mirror_write_priority": "latency",
+ "RC_change": "no", "compressed_copy_count": "0", "access_IO_group_count": "2", "last_access_time": "",
+ "parent_mdisk_grp_id": "2", "parent_mdisk_grp_name": "site1pool1", "owner_type": "none", "owner_id": "",
+ "owner_name": "", "encrypt": "no", "volume_id": "24", "volume_name": "test_volume", "function": "", "throttle_id": "",
+ "throttle_name": "", "IOPs_limit": "", "bandwidth_limit_MB": "", "volume_group_id": "0",
+ "volume_group_name": "test_volumegroup2", "cloud_backup_enabled": "no", "cloud_account_id": "",
+ "cloud_account_name": "", "backup_status": "off", "last_backup_time": "", "restore_status": "none",
+ "backup_grain_size": "", "deduplicated_copy_count": "0", "protocol": "", "preferred_node_name": "node1",
+ "safeguarded_expiration_time": "", "safeguarded_backup_count": "0"
+ }, {
+ "copy_id": "0", "status": "online",
+ "sync": "yes", "auto_delete": "no", "primary": "yes", "mdisk_grp_id": "2", "mdisk_grp_name": "site1pool1",
+ "type": "striped", "mdisk_id": "", "mdisk_name": "", "fast_write_state": "empty", "used_capacity": "1073741824",
+ "real_capacity": "1073741824", "free_capacity": "0", "overallocation": "100", "autoexpand": "", "warning": "",
+ "grainsize": "", "se_copy": "no", "easy_tier": "on", "easy_tier_status": "balanced", "tiers": [
+ {"tier": "tier_scm", "tier_capacity": "0"},
+ {"tier": "tier0_flash", "tier_capacity": "1073741824"},
+ {"tier": "tier1_flash", "tier_capacity": "0"},
+ {"tier": "tier_enterprise", "tier_capacity": "0"},
+ {"tier": "tier_nearline", "tier_capacity": "0"}
+ ], "compressed_copy": "no", "uncompressed_used_capacity": "1073741824", "parent_mdisk_grp_id": "2",
+ "parent_mdisk_grp_name": "site1pool1", "encrypt": "no", "deduplicated_copy": "no",
+ "used_capacity_before_reduction": "", "safeguarded_mdisk_grp_id": "", "safeguarded_mdisk_grp_name": ""
+ }
+ ]
+ v = IBMSVCvolume()
+ probe_data = v.probe_volume(data)
+ self.assertTrue('size' in probe_data)
+ self.assertTrue('iogrp' in probe_data)
+ self.assertTrue('volumegroup' in probe_data)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_expand_volume(self, svc_authorize_mock, svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'absent',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volume',
+ 'pool': 'test_pool',
+ 'size': '1',
+ 'unit': 'gb',
+ 'iogrp': 'io_grp0, io_grp1',
+ 'volumegroup': 'test_volumegroup'
+ })
+ svc_run_command_mock.return_value = None
+ v = IBMSVCvolume()
+ v.expand_volume(973741824)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_shrink_volume(self, svc_authorize_mock, svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'absent',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volume',
+ 'pool': 'test_pool',
+ 'size': '1',
+ 'unit': 'gb',
+ 'iogrp': 'io_grp0, io_grp1',
+ 'volumegroup': 'test_volumegroup'
+ })
+ svc_run_command_mock.return_value = None
+ v = IBMSVCvolume()
+ v.shrink_volume(973790)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_add_iogrp(self, svc_authorize_mock, svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'absent',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volume',
+ 'pool': 'test_pool',
+ 'size': '1',
+ 'unit': 'gb',
+ 'iogrp': 'io_grp0',
+ 'volumegroup': 'test_volumegroup'
+ })
+ svc_run_command_mock.return_value = None
+ v = IBMSVCvolume()
+ v.add_iogrp(['io_grp1', 'io_grp2'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_remove_iogrp(self, svc_authorize_mock, svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'absent',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volume',
+ 'pool': 'test_pool',
+ 'size': '1',
+ 'unit': 'gb',
+ 'iogrp': 'io_grp0',
+ 'volumegroup': 'test_volumegroup'
+ })
+ svc_run_command_mock.return_value = None
+ v = IBMSVCvolume()
+ v.remove_iogrp(['io_grp1'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_update_volume(self, svc_authorize_mock, srcm1, srcm2, srcm3):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'absent',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volume',
+ 'pool': 'test_pool',
+ 'size': '1',
+ 'unit': 'gb',
+ 'iogrp': 'io_grp0',
+ 'volumegroup': 'test_volumegroup2'
+ })
+ modify = {
+ "iogrp": {
+ "remove": [
+ "io_grp1",
+ "io_grp0"
+ ]
+ },
+ "size": {
+ "expand": 973741824
+ },
+ "volumegroup": {
+ "name": "test_volumegroup2"
+ }
+ }
+ srcm1.return_value = None
+ srcm2.return_value = None
+ srcm3.return_value = None
+ v = IBMSVCvolume()
+ v.update_volume(modify)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_module_with_for_missing_name_parameter(self, svc_authorize_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'absent',
+ 'username': 'username',
+ 'password': 'password',
+ 'pool': 'test_pool',
+ 'size': '1',
+ 'unit': 'gb',
+ 'iogrp': 'io_grp0',
+ 'volumegroup': 'test_volumegroup'
+ })
+ with pytest.raises(AnsibleFailJson) as exc:
+ v = IBMSVCvolume()
+ v.apply()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_module_with_missing_pool_parameter_while_creating_volume(self, svc_authorize_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'absent',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volume',
+ 'size': '1',
+ 'unit': 'gb',
+ 'iogrp': 'io_grp0',
+ 'volumegroup': 'test_volumegroup'
+ })
+ with pytest.raises(AnsibleFailJson) as exc:
+ v = IBMSVCvolume()
+ v.apply()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_volume.IBMSVCvolume.get_existing_volume')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_volume.IBMSVCvolume.assemble_iogrp')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_volume.IBMSVCvolume.mandatory_parameter_validation')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_module_for_creating_new_volume(self, auth_mock, c1, c2, c3, c4):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volume',
+ 'state': 'present',
+ 'size': '1',
+ 'unit': 'gb',
+ 'pool': 'test_pool',
+ 'iogrp': 'io_grp0, io_grp1',
+ })
+ c3.return_value = []
+ c4.return_value = {
+ 'id': '25',
+ 'message': 'Volume, id [25], successfully created'
+ }
+ with pytest.raises(AnsibleExitJson) as exc:
+ v = IBMSVCvolume()
+ v.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_volume.IBMSVCvolume.get_existing_volume')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_volume.IBMSVCvolume.assemble_iogrp')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_volume.IBMSVCvolume.mandatory_parameter_validation')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_module_for_creating_an_existing_volume(self, auth_mock, c1, c2, c3, c4):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volume',
+ 'state': 'present',
+ 'size': '1',
+ 'unit': 'gb',
+ 'pool': 'site1pool1',
+ })
+ c3.return_value = [
+ {
+ "id": "24", "name": "test_volume", "IO_group_id": "0", "IO_group_name": "io_grp0", "status": "online",
+ "mdisk_grp_id": "2", "mdisk_grp_name": "site1pool1", "capacity": "1073741824", "type": "striped",
+ "formatted": "yes", "formatting": "no", "mdisk_id": "", "mdisk_name": "", "FC_id": "", "FC_name": "",
+ "RC_id": "", "RC_name": "", "vdisk_UID": "60050768108180ED700000000000002E", "preferred_node_id": "1",
+ "fast_write_state": "empty", "cache": "readwrite", "udid": "", "fc_map_count": "0", "sync_rate": "50",
+ "copy_count": "1", "se_copy_count": "0", "filesystem": "", "mirror_write_priority": "latency",
+ "RC_change": "no", "compressed_copy_count": "0", "access_IO_group_count": "2", "last_access_time": "",
+ "parent_mdisk_grp_id": "2", "parent_mdisk_grp_name": "site1pool1", "owner_type": "none", "owner_id": "",
+ "owner_name": "", "encrypt": "no", "volume_id": "24", "volume_name": "test_volume", "function": "", "throttle_id": "",
+ "throttle_name": "", "IOPs_limit": "", "bandwidth_limit_MB": "", "volume_group_id": "0",
+ "volume_group_name": "test_volumegroup", "cloud_backup_enabled": "no", "cloud_account_id": "",
+ "cloud_account_name": "", "backup_status": "off", "last_backup_time": "", "restore_status": "none",
+ "backup_grain_size": "", "deduplicated_copy_count": "0", "protocol": "", "preferred_node_name": "node1",
+ "safeguarded_expiration_time": "", "safeguarded_backup_count": "0"
+ }, {
+ "copy_id": "0", "status": "online",
+ "sync": "yes", "auto_delete": "no", "primary": "yes", "mdisk_grp_id": "2", "mdisk_grp_name": "site1pool1",
+ "type": "striped", "mdisk_id": "", "mdisk_name": "", "fast_write_state": "empty", "used_capacity": "1073741824",
+ "real_capacity": "1073741824", "free_capacity": "0", "overallocation": "100", "autoexpand": "", "warning": "",
+ "grainsize": "", "se_copy": "no", "easy_tier": "on", "easy_tier_status": "balanced", "tiers": [
+ {"tier": "tier_scm", "tier_capacity": "0"},
+ {"tier": "tier0_flash", "tier_capacity": "1073741824"},
+ {"tier": "tier1_flash", "tier_capacity": "0"},
+ {"tier": "tier_enterprise", "tier_capacity": "0"},
+ {"tier": "tier_nearline", "tier_capacity": "0"}
+ ], "compressed_copy": "no", "uncompressed_used_capacity": "1073741824", "parent_mdisk_grp_id": "2",
+ "parent_mdisk_grp_name": "site1pool1", "encrypt": "no", "deduplicated_copy": "no",
+ "used_capacity_before_reduction": "", "safeguarded_mdisk_grp_id": "", "safeguarded_mdisk_grp_name": ""
+ }
+ ]
+ c4.return_value = {
+ 'id': '25',
+ 'message': 'Volume, id [25], successfully created'
+ }
+ with pytest.raises(AnsibleExitJson) as exc:
+ v = IBMSVCvolume()
+ v.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_volume.IBMSVCvolume.get_existing_volume')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_volume.IBMSVCvolume.assemble_iogrp')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_volume.IBMSVCvolume.mandatory_parameter_validation')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_module_for_deleting_an_existing_volume(self, auth_mock, c1, c2, c3, c4):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volume',
+ 'state': 'absent'
+ })
+ c3.return_value = [
+ {
+ "id": "24", "name": "test_volume", "IO_group_id": "0", "IO_group_name": "io_grp0", "status": "online",
+ "mdisk_grp_id": "2", "mdisk_grp_name": "site1pool1", "capacity": "1073741824", "type": "striped",
+ "formatted": "yes", "formatting": "no", "mdisk_id": "", "mdisk_name": "", "FC_id": "", "FC_name": "",
+ "RC_id": "", "RC_name": "", "vdisk_UID": "60050768108180ED700000000000002E", "preferred_node_id": "1",
+ "fast_write_state": "empty", "cache": "readwrite", "udid": "", "fc_map_count": "0", "sync_rate": "50",
+ "copy_count": "1", "se_copy_count": "0", "filesystem": "", "mirror_write_priority": "latency",
+ "RC_change": "no", "compressed_copy_count": "0", "access_IO_group_count": "2", "last_access_time": "",
+ "parent_mdisk_grp_id": "2", "parent_mdisk_grp_name": "site1pool1", "owner_type": "none", "owner_id": "",
+ "owner_name": "", "encrypt": "no", "volume_id": "24", "volume_name": "test_volume", "function": "", "throttle_id": "",
+ "throttle_name": "", "IOPs_limit": "", "bandwidth_limit_MB": "", "volume_group_id": "0",
+ "volume_group_name": "test_volumegroup", "cloud_backup_enabled": "no", "cloud_account_id": "",
+ "cloud_account_name": "", "backup_status": "off", "last_backup_time": "", "restore_status": "none",
+ "backup_grain_size": "", "deduplicated_copy_count": "0", "protocol": "", "preferred_node_name": "node1",
+ "safeguarded_expiration_time": "", "safeguarded_backup_count": "0"
+ }, {
+ "copy_id": "0", "status": "online",
+ "sync": "yes", "auto_delete": "no", "primary": "yes", "mdisk_grp_id": "2", "mdisk_grp_name": "site1pool1",
+ "type": "striped", "mdisk_id": "", "mdisk_name": "", "fast_write_state": "empty", "used_capacity": "1073741824",
+ "real_capacity": "1073741824", "free_capacity": "0", "overallocation": "100", "autoexpand": "", "warning": "",
+ "grainsize": "", "se_copy": "no", "easy_tier": "on", "easy_tier_status": "balanced", "tiers": [
+ {"tier": "tier_scm", "tier_capacity": "0"},
+ {"tier": "tier0_flash", "tier_capacity": "1073741824"},
+ {"tier": "tier1_flash", "tier_capacity": "0"},
+ {"tier": "tier_enterprise", "tier_capacity": "0"},
+ {"tier": "tier_nearline", "tier_capacity": "0"}
+ ], "compressed_copy": "no", "uncompressed_used_capacity": "1073741824", "parent_mdisk_grp_id": "2",
+ "parent_mdisk_grp_name": "site1pool1", "encrypt": "no", "deduplicated_copy": "no",
+ "used_capacity_before_reduction": "", "safeguarded_mdisk_grp_id": "", "safeguarded_mdisk_grp_name": ""
+ }
+ ]
+ c4.return_value = None
+ with pytest.raises(AnsibleExitJson) as exc:
+ v = IBMSVCvolume()
+ v.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_volume.IBMSVCvolume.get_existing_volume')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_volume.IBMSVCvolume.assemble_iogrp')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_volume.IBMSVCvolume.mandatory_parameter_validation')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_module_for_deleting_non_existing_volume(self, auth_mock, c1, c2, c3, c4):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volume',
+ 'state': 'absent',
+ 'size': '1',
+ 'unit': 'gb',
+ 'pool': 'test_pool',
+ })
+ c3.return_value = []
+ with pytest.raises(AnsibleExitJson) as exc:
+ v = IBMSVCvolume()
+ v.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_volume.IBMSVCvolume.get_existing_volume')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_volume.IBMSVCvolume.assemble_iogrp')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_volume.IBMSVCvolume.mandatory_parameter_validation')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_module_for_creating_thin_volume(self, auth_mock, c1, c2, c3, c4):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volume',
+ 'state': 'present',
+ 'size': '1',
+ 'unit': 'gb',
+ 'pool': 'test_pool',
+ 'iogrp': 'io_grp0, io_grp1',
+ 'thin': True,
+ 'buffersize': '10%'
+ })
+ c3.return_value = []
+ c4.return_value = {
+ 'id': '25',
+ 'message': 'Volume, id [25], successfully created'
+ }
+ with pytest.raises(AnsibleExitJson) as exc:
+ v = IBMSVCvolume()
+ v.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_volume.IBMSVCvolume.get_existing_volume')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_volume.IBMSVCvolume.assemble_iogrp')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_volume.IBMSVCvolume.mandatory_parameter_validation')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_module_for_creating_compressed_volume(self, auth_mock, c1, c2, c3, c4):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volume',
+ 'state': 'present',
+ 'size': '1',
+ 'unit': 'gb',
+ 'pool': 'test_pool',
+ 'iogrp': 'io_grp0, io_grp1',
+ 'compressed': True,
+ 'buffersize': '10%'
+ })
+ c3.return_value = []
+ c4.return_value = {
+ 'id': '25',
+ 'message': 'Volume, id [25], successfully created'
+ }
+ with pytest.raises(AnsibleExitJson) as exc:
+ v = IBMSVCvolume()
+ v.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_volume.IBMSVCvolume.get_existing_volume')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_volume.IBMSVCvolume.assemble_iogrp')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_volume.IBMSVCvolume.mandatory_parameter_validation')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_module_for_creating_deduplicated_volume(self, auth_mock, c1, c2, c3, c4):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volume',
+ 'state': 'present',
+ 'size': '1',
+ 'unit': 'gb',
+ 'pool': 'test_pool',
+ 'iogrp': 'io_grp0, io_grp1',
+ 'deduplicated': True,
+ })
+ c3.return_value = []
+ c4.return_value = {
+ 'id': '25',
+ 'message': 'Volume, id [25], successfully created'
+ }
+ with pytest.raises(AnsibleExitJson) as exc:
+ v = IBMSVCvolume()
+ v.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_volume.IBMSVCvolume.get_existing_volume')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_volume.IBMSVCvolume.assemble_iogrp')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_volume.IBMSVCvolume.mandatory_parameter_validation')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_failure_while_updating_pool_parameter(self, auth_mock, c1, c2, c3):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_thin',
+ 'state': 'present',
+ 'size': '1',
+ 'unit': 'gb',
+ 'pool': 'new_pool_name',
+ 'thin': True,
+ 'buffersize': '2%'
+ })
+ c3.return_value = [
+ {
+ "id": "77", "name": "test_thin", "IO_group_id": "0", "IO_group_name": "io_grp0", "status": "online",
+ "mdisk_grp_id": "0", "mdisk_grp_name": "site2pool1", "capacity": "1073741824", "type": "striped",
+ "formatted": "no", "formatting": "no", "mdisk_id": "", "mdisk_name": "", "FC_id": "", "FC_name": "",
+ "RC_id": "", "RC_name": "", "vdisk_UID": "60050764008881864800000000000471", "preferred_node_id": "1",
+ "fast_write_state": "empty", "cache": "readwrite", "udid": "", "fc_map_count": "0", "sync_rate": "50",
+ "copy_count": "1", "se_copy_count": "1", "filesystem": "", "mirror_write_priority": "latency",
+ "RC_change": "no", "compressed_copy_count": "0", "access_IO_group_count": "1", "last_access_time": "",
+ "parent_mdisk_grp_id": "0", "parent_mdisk_grp_name": "site2pool1", "owner_type": "none", "owner_id": "",
+ "owner_name": "", "encrypt": "no", "volume_id": "77", "volume_name": "test_thin", "function": "",
+ "throttle_id": "", "throttle_name": "", "IOPs_limit": "", "bandwidth_limit_MB": "", "volume_group_id": "",
+ "volume_group_name": "", "cloud_backup_enabled": "no", "cloud_account_id": "", "cloud_account_name": "",
+ "backup_status": "off", "last_backup_time": "", "restore_status": "none", "backup_grain_size": "",
+ "deduplicated_copy_count": "0", "protocol": "", "preferred_node_name": "node1",
+ "safeguarded_expiration_time": "", "safeguarded_backup_count": "0"
+ },
+ {
+ "copy_id": "0", "status": "online", "sync": "yes", "auto_delete": "no", "primary": "yes", "mdisk_grp_id": "0",
+ "mdisk_grp_name": "site2pool1", "type": "striped", "mdisk_id": "", "mdisk_name": "",
+ "fast_write_state": "empty", "used_capacity": "786432", "real_capacity": "38252032",
+ "free_capacity": "37465600", "overallocation": "2807", "autoexpand": "on", "warning": "80",
+ "grainsize": "256", "se_copy": "yes", "easy_tier": "on", "easy_tier_status": "balanced",
+ "tiers": [
+ {"tier": "tier_scm", "tier_capacity": "0"},
+ {"tier": "tier0_flash", "tier_capacity": "38252032"},
+ {"tier": "tier1_flash", "tier_capacity": "0"},
+ {"tier": "tier_enterprise", "tier_capacity": "0"},
+ {"tier": "tier_nearline", "tier_capacity": "0"}
+ ], "compressed_copy": "no", "uncompressed_used_capacity": "786432", "parent_mdisk_grp_id": "0",
+ "parent_mdisk_grp_name": "site2pool1", "encrypt": "no", "deduplicated_copy": "no",
+ "used_capacity_before_reduction": "", "safeguarded_mdisk_grp_id": "", "safeguarded_mdisk_grp_name": ""
+ }
+ ]
+ with pytest.raises(AnsibleFailJson) as exc:
+ v = IBMSVCvolume()
+ v.apply()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_volume.IBMSVCvolume.get_existing_volume')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_volume.IBMSVCvolume.assemble_iogrp')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_volume.IBMSVCvolume.mandatory_parameter_validation')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_failure_while_updating_thin_parameter(self, auth_mock, c1, c2, c3):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volume',
+ 'state': 'present',
+ 'size': '1',
+ 'unit': 'gb',
+ 'pool': 'site2pool1',
+ 'thin': True,
+ 'buffersize': '2%'
+ })
+ c3.return_value = [
+ {
+ "id": "24", "name": "test_volume", "IO_group_id": "0", "IO_group_name": "io_grp0", "status": "online",
+ "mdisk_grp_id": "2", "mdisk_grp_name": "site1pool1", "capacity": "1073741824", "type": "striped",
+ "formatted": "yes", "formatting": "no", "mdisk_id": "", "mdisk_name": "", "FC_id": "", "FC_name": "",
+ "RC_id": "", "RC_name": "", "vdisk_UID": "60050768108180ED700000000000002E", "preferred_node_id": "1",
+ "fast_write_state": "empty", "cache": "readwrite", "udid": "", "fc_map_count": "0", "sync_rate": "50",
+ "copy_count": "1", "se_copy_count": "0", "filesystem": "", "mirror_write_priority": "latency",
+ "RC_change": "no", "compressed_copy_count": "0", "access_IO_group_count": "2", "last_access_time": "",
+ "parent_mdisk_grp_id": "2", "parent_mdisk_grp_name": "site1pool1", "owner_type": "none", "owner_id": "",
+ "owner_name": "", "encrypt": "no", "volume_id": "24", "volume_name": "test_volume", "function": "", "throttle_id": "",
+ "throttle_name": "", "IOPs_limit": "", "bandwidth_limit_MB": "", "volume_group_id": "0",
+ "volume_group_name": "test_volumegroup", "cloud_backup_enabled": "no", "cloud_account_id": "",
+ "cloud_account_name": "", "backup_status": "off", "last_backup_time": "", "restore_status": "none",
+ "backup_grain_size": "", "deduplicated_copy_count": "0", "protocol": "", "preferred_node_name": "node1",
+ "safeguarded_expiration_time": "", "safeguarded_backup_count": "0"
+ }, {
+ "copy_id": "0", "status": "online",
+ "sync": "yes", "auto_delete": "no", "primary": "yes", "mdisk_grp_id": "2", "mdisk_grp_name": "site1pool1",
+ "type": "striped", "mdisk_id": "", "mdisk_name": "", "fast_write_state": "empty", "used_capacity": "1073741824",
+ "real_capacity": "1073741824", "free_capacity": "0", "overallocation": "100", "autoexpand": "", "warning": "",
+ "grainsize": "", "se_copy": "no", "easy_tier": "on", "easy_tier_status": "balanced", "tiers": [
+ {"tier": "tier_scm", "tier_capacity": "0"},
+ {"tier": "tier0_flash", "tier_capacity": "1073741824"},
+ {"tier": "tier1_flash", "tier_capacity": "0"},
+ {"tier": "tier_enterprise", "tier_capacity": "0"},
+ {"tier": "tier_nearline", "tier_capacity": "0"}
+ ], "compressed_copy": "no", "uncompressed_used_capacity": "1073741824", "parent_mdisk_grp_id": "2",
+ "parent_mdisk_grp_name": "site1pool1", "encrypt": "no", "deduplicated_copy": "no",
+ "used_capacity_before_reduction": "", "safeguarded_mdisk_grp_id": "", "safeguarded_mdisk_grp_name": ""
+ }
+ ]
+ with pytest.raises(AnsibleFailJson) as exc:
+ v = IBMSVCvolume()
+ v.apply()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_volume.IBMSVCvolume.get_existing_volume')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_volume.IBMSVCvolume.assemble_iogrp')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_volume.IBMSVCvolume.mandatory_parameter_validation')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_failure_while_updating_compressed_parameter(self, auth_mock, c1, c2, c3):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volume',
+ 'state': 'present',
+ 'size': '1',
+ 'unit': 'gb',
+ 'pool': 'site2pool1',
+ 'compressed': True,
+ 'buffersize': '2%'
+ })
+ c3.return_value = [
+ {
+ "id": "24", "name": "test_volume", "IO_group_id": "0", "IO_group_name": "io_grp0", "status": "online",
+ "mdisk_grp_id": "2", "mdisk_grp_name": "site1pool1", "capacity": "1073741824", "type": "striped",
+ "formatted": "yes", "formatting": "no", "mdisk_id": "", "mdisk_name": "", "FC_id": "", "FC_name": "",
+ "RC_id": "", "RC_name": "", "vdisk_UID": "60050768108180ED700000000000002E", "preferred_node_id": "1",
+ "fast_write_state": "empty", "cache": "readwrite", "udid": "", "fc_map_count": "0", "sync_rate": "50",
+ "copy_count": "1", "se_copy_count": "0", "filesystem": "", "mirror_write_priority": "latency",
+ "RC_change": "no", "compressed_copy_count": "0", "access_IO_group_count": "2", "last_access_time": "",
+ "parent_mdisk_grp_id": "2", "parent_mdisk_grp_name": "site1pool1", "owner_type": "none", "owner_id": "",
+ "owner_name": "", "encrypt": "no", "volume_id": "24", "volume_name": "test_volume", "function": "", "throttle_id": "",
+ "throttle_name": "", "IOPs_limit": "", "bandwidth_limit_MB": "", "volume_group_id": "0",
+ "volume_group_name": "test_volumegroup", "cloud_backup_enabled": "no", "cloud_account_id": "",
+ "cloud_account_name": "", "backup_status": "off", "last_backup_time": "", "restore_status": "none",
+ "backup_grain_size": "", "deduplicated_copy_count": "0", "protocol": "", "preferred_node_name": "node1",
+ "safeguarded_expiration_time": "", "safeguarded_backup_count": "0"
+ }, {
+ "copy_id": "0", "status": "online",
+ "sync": "yes", "auto_delete": "no", "primary": "yes", "mdisk_grp_id": "2", "mdisk_grp_name": "site1pool1",
+ "type": "striped", "mdisk_id": "", "mdisk_name": "", "fast_write_state": "empty", "used_capacity": "1073741824",
+ "real_capacity": "1073741824", "free_capacity": "0", "overallocation": "100", "autoexpand": "", "warning": "",
+ "grainsize": "", "se_copy": "no", "easy_tier": "on", "easy_tier_status": "balanced", "tiers": [
+ {"tier": "tier_scm", "tier_capacity": "0"},
+ {"tier": "tier0_flash", "tier_capacity": "1073741824"},
+ {"tier": "tier1_flash", "tier_capacity": "0"},
+ {"tier": "tier_enterprise", "tier_capacity": "0"},
+ {"tier": "tier_nearline", "tier_capacity": "0"}
+ ], "compressed_copy": "no", "uncompressed_used_capacity": "1073741824", "parent_mdisk_grp_id": "2",
+ "parent_mdisk_grp_name": "site1pool1", "encrypt": "no", "deduplicated_copy": "no",
+ "used_capacity_before_reduction": "", "safeguarded_mdisk_grp_id": "", "safeguarded_mdisk_grp_name": ""
+ }
+ ]
+ with pytest.raises(AnsibleFailJson) as exc:
+ v = IBMSVCvolume()
+ v.apply()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_volume.IBMSVCvolume.get_existing_volume')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_volume.IBMSVCvolume.assemble_iogrp')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_volume.IBMSVCvolume.mandatory_parameter_validation')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_failure_while_updating_deduplicated_parameter(self, auth_mock, c1, c2, c3):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volume',
+ 'state': 'present',
+ 'size': '1',
+ 'unit': 'gb',
+ 'pool': 'site2pool1',
+ 'compressed': True,
+ 'deduplicated': True,
+ 'buffersize': '2%'
+ })
+ c3.return_value = [
+ {
+ "id": "24", "name": "test_volume", "IO_group_id": "0", "IO_group_name": "io_grp0", "status": "online",
+ "mdisk_grp_id": "2", "mdisk_grp_name": "site1pool1", "capacity": "1073741824", "type": "striped",
+ "formatted": "yes", "formatting": "no", "mdisk_id": "", "mdisk_name": "", "FC_id": "", "FC_name": "",
+ "RC_id": "", "RC_name": "", "vdisk_UID": "60050768108180ED700000000000002E", "preferred_node_id": "1",
+ "fast_write_state": "empty", "cache": "readwrite", "udid": "", "fc_map_count": "0", "sync_rate": "50",
+ "copy_count": "1", "se_copy_count": "0", "filesystem": "", "mirror_write_priority": "latency",
+ "RC_change": "no", "compressed_copy_count": "0", "access_IO_group_count": "2", "last_access_time": "",
+ "parent_mdisk_grp_id": "2", "parent_mdisk_grp_name": "site1pool1", "owner_type": "none", "owner_id": "",
+ "owner_name": "", "encrypt": "no", "volume_id": "24", "volume_name": "test_volume", "function": "", "throttle_id": "",
+ "throttle_name": "", "IOPs_limit": "", "bandwidth_limit_MB": "", "volume_group_id": "0",
+ "volume_group_name": "test_volumegroup", "cloud_backup_enabled": "no", "cloud_account_id": "",
+ "cloud_account_name": "", "backup_status": "off", "last_backup_time": "", "restore_status": "none",
+ "backup_grain_size": "", "deduplicated_copy_count": "0", "protocol": "", "preferred_node_name": "node1",
+ "safeguarded_expiration_time": "", "safeguarded_backup_count": "0"
+ }, {
+ "copy_id": "0", "status": "online",
+ "sync": "yes", "auto_delete": "no", "primary": "yes", "mdisk_grp_id": "2", "mdisk_grp_name": "site1pool1",
+ "type": "striped", "mdisk_id": "", "mdisk_name": "", "fast_write_state": "empty", "used_capacity": "1073741824",
+ "real_capacity": "1073741824", "free_capacity": "0", "overallocation": "100", "autoexpand": "", "warning": "",
+ "grainsize": "", "se_copy": "no", "easy_tier": "on", "easy_tier_status": "balanced", "tiers": [
+ {"tier": "tier_scm", "tier_capacity": "0"},
+ {"tier": "tier0_flash", "tier_capacity": "1073741824"},
+ {"tier": "tier1_flash", "tier_capacity": "0"},
+ {"tier": "tier_enterprise", "tier_capacity": "0"},
+ {"tier": "tier_nearline", "tier_capacity": "0"}
+ ], "compressed_copy": "no", "uncompressed_used_capacity": "1073741824", "parent_mdisk_grp_id": "2",
+ "parent_mdisk_grp_name": "site1pool1", "encrypt": "no", "deduplicated_copy": "no",
+ "used_capacity_before_reduction": "", "safeguarded_mdisk_grp_id": "", "safeguarded_mdisk_grp_name": ""
+ }
+ ]
+ with pytest.raises(AnsibleFailJson) as exc:
+ v = IBMSVCvolume()
+ v.apply()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_volume.IBMSVCvolume.get_existing_volume')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_volume.IBMSVCvolume.assemble_iogrp')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_volume.IBMSVCvolume.mandatory_parameter_validation')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_failure_while_managing_mirrored_volume(self, auth_mock, c1, c2, c3):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_compress',
+ 'state': 'present',
+ 'size': '2',
+ 'unit': 'gb',
+ 'pool': 'site2pool1',
+ 'compressed': True,
+ 'deduplicated': True,
+ 'buffersize': '2%'
+ })
+ c3.return_value = [
+ {
+ "id": "78", "name": "test_compress", "IO_group_id": "0", "IO_group_name": "io_grp0",
+ "status": "online", "mdisk_grp_id": "0", "mdisk_grp_name": "site2pool1", "capacity": "1073741824",
+ "type": "many", "formatted": "no", "formatting": "no", "mdisk_id": "", "mdisk_name": "",
+ "FC_id": "", "FC_name": "", "RC_id": "", "RC_name": "", "vdisk_UID": "60050764008881864800000000000472",
+ "preferred_node_id": "5", "fast_write_state": "empty", "cache": "readwrite", "udid": "",
+ "fc_map_count": "0", "sync_rate": "50", "copy_count": "1", "se_copy_count": "0", "filesystem": "",
+ "mirror_write_priority": "latency", "RC_change": "no", "compressed_copy_count": "1",
+ "access_IO_group_count": "1", "last_access_time": "", "parent_mdisk_grp_id": "0",
+ "parent_mdisk_grp_name": "site2pool1", "owner_type": "none", "owner_id": "", "owner_name": "",
+ "encrypt": "no", "volume_id": "78", "volume_name": "test_compress", "function": "",
+ "throttle_id": "", "throttle_name": "", "IOPs_limit": "", "bandwidth_limit_MB": "",
+ "volume_group_id": "", "volume_group_name": "", "cloud_backup_enabled": "no", "cloud_account_id": "",
+ "cloud_account_name": "", "backup_status": "off", "last_backup_time": "", "restore_status": "none",
+ "backup_grain_size": "", "deduplicated_copy_count": "0", "protocol": "", "preferred_node_name": "node2",
+ "safeguarded_expiration_time": "", "safeguarded_backup_count": "0"
+ },
+ {
+ "copy_id": "0", "status": "online", "sync": "yes", "auto_delete": "no", "primary": "yes",
+ "mdisk_grp_id": "0", "mdisk_grp_name": "site2pool1", "type": "striped", "mdisk_id": "",
+ "mdisk_name": "", "fast_write_state": "empty", "used_capacity": "163840",
+ "real_capacity": "38252032", "free_capacity": "38088192", "overallocation": "2807",
+ "autoexpand": "on", "warning": "80", "grainsize": "", "se_copy": "no", "easy_tier": "on",
+ "easy_tier_status": "balanced", "tiers": [
+ {"tier": "tier_scm", "tier_capacity": "0"},
+ {"tier": "tier0_flash", "tier_capacity": "38252032"},
+ {"tier": "tier1_flash", "tier_capacity": "0"},
+ {"tier": "tier_enterprise", "tier_capacity": "0"},
+ {"tier": "tier_nearline", "tier_capacity": "0"}
+ ], "compressed_copy": "yes", "uncompressed_used_capacity": "0", "parent_mdisk_grp_id": "0",
+ "parent_mdisk_grp_name": "site2pool1", "encrypt": "no", "deduplicated_copy": "yes",
+ "used_capacity_before_reduction": "", "safeguarded_mdisk_grp_id": "",
+ "safeguarded_mdisk_grp_name": ""
+ }
+ ]
+ with pytest.raises(AnsibleFailJson) as exc:
+ v = IBMSVCvolume()
+ v.apply()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_volume_rename(self, auth, mock_old, src):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'old_name': 'name',
+ 'name': 'new_name',
+ 'state': 'present',
+ })
+ arg_data = []
+ mock_old.return_value = [
+ {
+ "id": "24", "name": "test_volume", "IO_group_id": "0", "IO_group_name": "io_grp0", "status": "online",
+ "mdisk_grp_id": "2", "mdisk_grp_name": "site1pool1", "capacity": "1073741824", "type": "striped",
+ "formatted": "yes", "formatting": "no", "mdisk_id": "", "mdisk_name": "", "FC_id": "", "FC_name": "",
+ "RC_id": "", "RC_name": "", "vdisk_UID": "60050768108180ED700000000000002E", "preferred_node_id": "1",
+ "fast_write_state": "empty", "cache": "readwrite", "udid": "", "fc_map_count": "0", "sync_rate": "50",
+ "copy_count": "1", "se_copy_count": "0", "filesystem": "", "mirror_write_priority": "latency",
+ "RC_change": "no", "compressed_copy_count": "0", "access_IO_group_count": "2", "last_access_time": "",
+ "parent_mdisk_grp_id": "2", "parent_mdisk_grp_name": "site1pool1", "owner_type": "none", "owner_id": "",
+ "owner_name": "", "encrypt": "no", "volume_id": "24", "volume_name": "test_volume", "function": "", "throttle_id": "",
+ "throttle_name": "", "IOPs_limit": "", "bandwidth_limit_MB": "", "volume_group_id": "0",
+ "volume_group_name": "test_volumegroup", "cloud_backup_enabled": "no", "cloud_account_id": "",
+ "cloud_account_name": "", "backup_status": "off", "last_backup_time": "", "restore_status": "none",
+ "backup_grain_size": "", "deduplicated_copy_count": "0", "protocol": "", "preferred_node_name": "node1",
+ "safeguarded_expiration_time": "", "safeguarded_backup_count": "0"
+ }, {
+ "copy_id": "0", "status": "online",
+ "sync": "yes", "auto_delete": "no", "primary": "yes", "mdisk_grp_id": "2", "mdisk_grp_name": "site1pool1",
+ "type": "striped", "mdisk_id": "", "mdisk_name": "", "fast_write_state": "empty", "used_capacity": "1073741824",
+ "real_capacity": "1073741824", "free_capacity": "0", "overallocation": "100", "autoexpand": "", "warning": "",
+ "grainsize": "", "se_copy": "no", "easy_tier": "on", "easy_tier_status": "balanced", "tiers": [
+ {"tier": "tier_scm", "tier_capacity": "0"},
+ {"tier": "tier0_flash", "tier_capacity": "1073741824"},
+ {"tier": "tier1_flash", "tier_capacity": "0"},
+ {"tier": "tier_enterprise", "tier_capacity": "0"},
+ {"tier": "tier_nearline", "tier_capacity": "0"}
+ ], "compressed_copy": "no", "uncompressed_used_capacity": "1073741824", "parent_mdisk_grp_id": "2",
+ "parent_mdisk_grp_name": "site1pool1", "encrypt": "no", "deduplicated_copy": "no",
+ "used_capacity_before_reduction": "", "safeguarded_mdisk_grp_id": "", "safeguarded_mdisk_grp_name": ""
+ }
+ ]
+ src.return_value = None
+ v = IBMSVCvolume()
+ data = v.volume_rename(arg_data)
+ self.assertEqual(data, 'Volume [name] has been successfully rename to [new_name]')
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_volume_rename_failure_for_unsupported_param(self, am):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'old_name': 'name',
+ 'name': 'new_name',
+ 'state': 'present',
+ 'thin': True
+ })
+ with pytest.raises(AnsibleFailJson) as exc:
+ v = IBMSVCvolume()
+ v.apply()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_cloud_backup_validation(self, auth, obj_mock, src):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'name',
+ 'enable_cloud_snapshot': True,
+ 'cloud_account_name': 'aws_acc',
+ 'state': 'present',
+ })
+
+ obj_mock.return_value = {}
+ with pytest.raises(AnsibleFailJson) as exc:
+ v = IBMSVCvolume()
+ v.apply()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_enable_cloud_backup(self, auth, obj_mock, src):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'name',
+ 'enable_cloud_snapshot': True,
+ 'cloud_account_name': 'aws_acc',
+ 'state': 'present',
+ })
+
+ obj_mock.return_value = [{'name': 'name', 'cloud_backup_enabled': 'no', 'type': 'striped', 'RC_name': ''}, {}]
+ with pytest.raises(AnsibleExitJson) as exc:
+ v = IBMSVCvolume()
+ v.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_enable_cloud_backup_idempotency(self, auth, obj_mock, src):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'name',
+ 'enable_cloud_snapshot': True,
+ 'cloud_account_name': 'aws_acc',
+ 'state': 'present',
+ })
+
+ obj_mock.return_value = [{'name': 'name', 'cloud_backup_enabled': 'yes', 'cloud_account_name': 'aws_acc', 'type': 'striped', 'RC_name': ''}, {}]
+ with pytest.raises(AnsibleExitJson) as exc:
+ v = IBMSVCvolume()
+ v.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_disable_cloud_backup(self, auth, obj_mock, src):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'name',
+ 'enable_cloud_snapshot': False,
+ 'state': 'present',
+ })
+
+ obj_mock.return_value = [{'name': 'name', 'cloud_backup_enabled': 'yes', 'type': 'striped', 'RC_name': ''}, {}]
+ with pytest.raises(AnsibleExitJson) as exc:
+ v = IBMSVCvolume()
+ v.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_disable_cloud_backup_idempotency(self, auth, obj_mock, src):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'name',
+ 'enable_cloud_snapshot': False,
+ 'state': 'present',
+ })
+
+ obj_mock.return_value = [{'name': 'name', 'cloud_backup_enabled': 'no', 'type': 'striped', 'RC_name': ''}, {}]
+ with pytest.raises(AnsibleExitJson) as exc:
+ v = IBMSVCvolume()
+ v.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ # Create thinclone from volume
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_volume.IBMSVCvolume.create_transient_snapshot')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_volume_thinclone(self, svc_authorize_mock, svc_run_command_mock, create_transient_snapshot_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volume',
+ 'pool': 'test_pool',
+ 'type': 'thinclone',
+ 'fromsourcevolume': 'vol1'
+ })
+
+ svc_run_command_mock.return_value = {
+ 'id': '25',
+ 'message': 'Volume, id [25], successfully created'
+ }
+ create_transient_snapshot_mock.return_value = 10
+ v = IBMSVCvolume()
+ v.create_volume()
+
+ # Create clone from volume
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_volume.IBMSVCvolume.create_transient_snapshot')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_volume_clone(self, svc_authorize_mock, svc_run_command_mock, create_transient_snapshot_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volume',
+ 'pool': 'test_pool',
+ 'type': 'clone',
+ 'fromsourcevolume': 'vol1'
+ })
+
+ svc_run_command_mock.return_value = {
+ 'id': '25',
+ 'message': 'Volume, id [25], successfully created'
+ }
+ create_transient_snapshot_mock.return_value = 10
+ v = IBMSVCvolume()
+ v.create_volume()
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_failure_when_thinclone_creation_parameter_type_missing(self, svc_authorize_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volume',
+ 'fromsourcevolume': 'src_volume1',
+ 'pool': 'pool1'
+ })
+ with pytest.raises(AnsibleFailJson) as exc:
+ v = IBMSVCvolume()
+ v.volume_creation_parameter_validation()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_failure_when_thinclone_creation_parameter_fromsourcevolume_missing(self, svc_authorize_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volume',
+ 'type': 'thinclone',
+ 'pool': 'pool1'
+ })
+ with pytest.raises(AnsibleFailJson) as exc:
+ v = IBMSVCvolume()
+ v.volume_creation_parameter_validation()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_failure_when_thinclone_creation_parameter_pool_missing(self, svc_authorize_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volume',
+ 'type': 'thinclone',
+ 'fromsourcevolume': 'src_volume1',
+ })
+ with pytest.raises(AnsibleFailJson) as exc:
+ v = IBMSVCvolume()
+ v.volume_creation_parameter_validation()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_failure_when_size_provided_in_thinclone_creation(self, svc_authorize_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volume',
+ 'type': 'thinclone',
+ 'fromsourcevolume': 'src_volume1',
+ 'size': '2048',
+ 'pool': 'pool1'
+ })
+ with pytest.raises(AnsibleFailJson) as exc:
+ v = IBMSVCvolume()
+ v.volume_creation_parameter_validation()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_volume_rename_failure_for_unsupported_param_type(self, am):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'old_name': 'name',
+ 'name': 'new_name',
+ 'state': 'present',
+ 'type': 'thinclone'
+ })
+ with pytest.raises(AnsibleFailJson) as exc:
+ v = IBMSVCvolume()
+ v.apply()
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ # Test create_transient_snapshot
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_transient_snapshot(self, svc_authorize_mock, svc_run_cmd_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'new_name',
+ 'state': 'present',
+ 'type': 'thinclone',
+ 'fromsourcevolume': 'vol1'
+ })
+ svc_run_cmd_mock.return_value = {
+ "id": "3",
+ "message": "Snapshot, id [3], successfully created or triggered"
+ }
+ v = IBMSVCvolume()
+ snapshot_id = v.create_transient_snapshot()
+ self.assertEqual(snapshot_id, '3')
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_volumegroup.py b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_volumegroup.py
new file mode 100644
index 000000000..cdfbfcb05
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_manage_volumegroup.py
@@ -0,0 +1,1638 @@
+# Copyright (C) 2020 IBM CORPORATION
+# Author(s):
+#
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+""" unit tests IBM Storage Virtualize Ansible module: ibm_svc_manage_volumegroup """
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import unittest
+import pytest
+import json
+from mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi
+from ansible_collections.ibm.storage_virtualize.plugins.modules.ibm_svc_manage_volumegroup import IBMSVCVG
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module
+ creation """
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the
+ test case """
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the
+ test case """
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an
+ exception """
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an
+ exception """
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class TestIBMSVCvdisk(unittest.TestCase):
+ """ a group of related Unit Tests"""
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def setUp(self, connect):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.restapi = IBMSVCRestApi(self.mock_module_helper, '1.2.3.4',
+ 'domain.ibm.com', 'username', 'password',
+ False, 'test.log', '')
+
+ def set_default_args(self):
+ return dict({
+ 'name': 'test',
+ 'state': 'present'
+ })
+
+ def test_module_fail_when_required_args_missing(self):
+ """ required arguments are reported as errors """
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ IBMSVCVG()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_get_existing_vg(self, mock_svc_authorize, svc_obj_info_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volume',
+ 'state': 'present',
+ })
+ svc_obj_info_mock.return_value = {
+ "id": "8",
+ "name": "test_volumegroup",
+ "volume_count": "0",
+ "backup_status": "empty",
+ "last_backup_time": "",
+ "owner_id": "",
+ "owner_name": "",
+ "safeguarded_policy_id": "",
+ "safeguarded_policy_name": "",
+ "safeguarded_policy_start_time": "",
+ "snapshot_policy_name": "",
+ "snapshot_policy_suspended": "no",
+ "ignore_user_flash_copy_maps": "no",
+ "snapshot_policy_safeguarded": "no"
+ }
+ vg = IBMSVCVG()
+ vg.get_existing_vg("test_volumegroup")
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_vg_probe_adding_ownershipgroup(self, mock_svc_authorize):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volume',
+ 'state': 'present',
+ 'ownershipgroup': 'test_ownershipgroup_new',
+ })
+ data = {
+ "id": "8",
+ "name": "test_volumegroup",
+ "volume_count": "0",
+ "backup_status": "empty",
+ "last_backup_time": "",
+ "owner_id": "",
+ "owner_name": "",
+ "safeguarded_policy_id": "",
+ "safeguarded_policy_name": "",
+ "safeguarded_policy_start_time": "",
+ "snapshot_policy_name": "",
+ "snapshot_policy_suspended": "no",
+ "ignore_user_flash_copy_maps": "no",
+ "snapshot_policy_safeguarded": "no"
+ }
+ vg = IBMSVCVG()
+ probe_data = vg.vg_probe(data)
+ self.assertTrue('ownershipgroup' in probe_data)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_vg_probe_updating_ownershipgroup(self, mock_svc_authorize):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volume',
+ 'state': 'present',
+ 'ownershipgroup': 'test_ownershipgroup_new',
+ })
+ data = {
+ "id": "8",
+ "name": "test_volumegroup",
+ "volume_count": "0",
+ "backup_status": "empty",
+ "last_backup_time": "",
+ "owner_id": "",
+ "owner_name": "test_ownershipgroup_old",
+ "safeguarded_policy_id": "",
+ "safeguarded_policy_name": "",
+ "safeguarded_policy_start_time": "",
+ "snapshot_policy_name": "",
+ "snapshot_policy_suspended": "no",
+ "ignore_user_flash_copy_maps": "no",
+ "snapshot_policy_safeguarded": "no"
+ }
+ vg = IBMSVCVG()
+ probe_data = vg.vg_probe(data)
+ self.assertTrue('ownershipgroup' in probe_data)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_vg_probe_with_noownershipgroup(self, mock_svc_authorize):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volume',
+ 'state': 'present',
+ 'noownershipgroup': True
+ })
+ data = {
+ "id": "8",
+ "name": "test_volumegroup",
+ "volume_count": "0",
+ "backup_status": "empty",
+ "last_backup_time": "",
+ "owner_id": "",
+ "owner_name": "test_ownershipgroup",
+ "safeguarded_policy_id": "",
+ "safeguarded_policy_name": "",
+ "safeguarded_policy_start_time": "",
+ "snapshot_policy_name": "",
+ "snapshot_policy_suspended": "no",
+ "ignore_user_flash_copy_maps": "no",
+ "snapshot_policy_safeguarded": "no"
+ }
+ vg = IBMSVCVG()
+ probe_data = vg.vg_probe(data)
+ self.assertTrue('noownershipgroup' in probe_data)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_vg_probe_add_safeguardpolicyname(self, mock_svc_authorize):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volume',
+ 'state': 'present',
+ 'safeguardpolicyname': 'policy_name'
+ })
+ data = {
+ "id": "8",
+ "name": "test_volumegroup",
+ "volume_count": "0",
+ "backup_status": "empty",
+ "last_backup_time": "",
+ "owner_id": "",
+ "owner_name": "test_ownershipgroup",
+ "safeguarded_policy_id": "",
+ "safeguarded_policy_name": "",
+ "safeguarded_policy_start_time": "",
+ "snapshot_policy_name": "",
+ "snapshot_policy_suspended": "no",
+ "ignore_user_flash_copy_maps": "no",
+ "snapshot_policy_safeguarded": "no"
+ }
+ vg = IBMSVCVG()
+ probe_data = vg.vg_probe(data)
+ self.assertTrue('safeguardedpolicy' in probe_data)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_vg_probe_update_safeguardpolicyname(self, mock_svc_authorize):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volume',
+ 'state': 'present',
+ 'safeguardpolicyname': 'new_policy_name'
+ })
+ data = {
+ "id": "8",
+ "name": "test_volumegroup",
+ "volume_count": "0",
+ "backup_status": "empty",
+ "last_backup_time": "",
+ "owner_id": "",
+ "owner_name": "test_ownershipgroup",
+ "safeguarded_policy_id": "",
+ "safeguarded_policy_name": "old_policy_name",
+ "safeguarded_policy_start_time": "",
+ "snapshot_policy_name": "",
+ "snapshot_policy_suspended": "no",
+ "ignore_user_flash_copy_maps": "no",
+ "snapshot_policy_safeguarded": "no"
+ }
+ vg = IBMSVCVG()
+ probe_data = vg.vg_probe(data)
+ self.assertTrue('safeguardedpolicy' in probe_data)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_failure_for_mutual_exclusive_parameter_1(self, mock_svc_authorize):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volume',
+ 'state': 'present',
+ 'ownershipgroup': 'test_ownershipgroup',
+ 'noownershipgroup': True
+ })
+ data = {
+ "id": "8",
+ "name": "test_volumegroup",
+ "volume_count": "0",
+ "backup_status": "empty",
+ "last_backup_time": "",
+ "owner_id": "",
+ "owner_name": "",
+ "safeguarded_policy_id": "",
+ "safeguarded_policy_name": "",
+ "safeguarded_policy_start_time": "",
+ "snapshot_policy_name": "",
+ "snapshot_policy_suspended": "no",
+ "ignore_user_flash_copy_maps": "no",
+ "snapshot_policy_safeguarded": "no"
+ }
+ vg = IBMSVCVG()
+ with pytest.raises(AnsibleFailJson) as exc:
+ vg.vg_probe(data)
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_failure_for_mutual_exclusive_parameter_2(self, mock_svc_authorize):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volume',
+ 'state': 'present',
+ 'safeguardpolicyname': 'policy_name',
+ 'nosafeguardpolicy': True
+ })
+ data = {
+ "id": "8",
+ "name": "test_volumegroup",
+ "volume_count": "0",
+ "backup_status": "empty",
+ "last_backup_time": "",
+ "owner_id": "",
+ "owner_name": "",
+ "safeguarded_policy_id": "",
+ "safeguarded_policy_name": "",
+ "safeguarded_policy_start_time": "",
+ "snapshot_policy_name": "",
+ "snapshot_policy_suspended": "no",
+ "ignore_user_flash_copy_maps": "no",
+ "snapshot_policy_safeguarded": "no"
+ }
+ vg = IBMSVCVG()
+ with pytest.raises(AnsibleFailJson) as exc:
+ vg.vg_probe(data)
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_failure_for_mutual_exclusive_parameter_3(self, mock_svc_authorize):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volume',
+ 'state': 'present',
+ 'ownershipgroup': 'test_ownershipgroup',
+ 'safeguardpolicyname': 'policy_name'
+ })
+ data = {
+ "id": "8",
+ "name": "test_volumegroup",
+ "volume_count": "0",
+ "backup_status": "empty",
+ "last_backup_time": "",
+ "owner_id": "",
+ "owner_name": "",
+ "safeguarded_policy_id": "",
+ "safeguarded_policy_name": "",
+ "safeguarded_policy_start_time": "",
+ "snapshot_policy_name": "",
+ "snapshot_policy_suspended": "no",
+ "ignore_user_flash_copy_maps": "no",
+ "snapshot_policy_safeguarded": "no"
+ }
+ vg = IBMSVCVG()
+ with pytest.raises(AnsibleFailJson) as exc:
+ vg.vg_probe(data)
+ self.assertTrue(exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_vg_create(self, mock_svc_authorize, svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volume',
+ 'state': 'present',
+ 'ownershipgroup': 'test_ownershipgroup',
+ })
+ svc_run_command_mock.return_value = {
+ 'id': '56',
+ 'message': 'success'
+ }
+ vg = IBMSVCVG()
+ probe_data = vg.vg_create()
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_vg_update_with_noownershipgroup_nosafeguardpolicy(self,
+ mock_svc_authorize,
+ svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volume',
+ 'state': 'present',
+ 'noownershipgroup': True,
+ 'nosafeguardpolicy': True
+ })
+ probe_data = {
+ 'noownershipgroup': True,
+ 'nosafeguardpolicy': True
+ }
+ svc_run_command_mock.return_value = None
+ vg = IBMSVCVG()
+ probe_data = vg.vg_update(probe_data)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_vg_update_with_ownershipgroup_nosafeguardpolicy(self,
+ mock_svc_authorize,
+ svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volume',
+ 'state': 'present',
+ 'ownershipgroup': 'group_name',
+ 'nosafeguardpolicy': True
+ })
+ probe_data = {
+ 'ownershipgroup': 'group_name',
+ 'nosafeguardpolicy': True
+ }
+ svc_run_command_mock.return_value = None
+ vg = IBMSVCVG()
+ probe_data = vg.vg_update(probe_data)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_vg_update_with_safeguardpolicyname(self, mock_svc_authorize,
+ svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volume',
+ 'state': 'present',
+ 'safeguardpolicyname': 'policy_name'
+ })
+ probe_data = {
+ 'safeguardedpolicy': 'policy_name'
+ }
+ svc_run_command_mock.return_value = None
+ vg = IBMSVCVG()
+ probe_data = vg.vg_update(probe_data)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_vg_update_with_policystarttime(self, mock_svc_authorize,
+ svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volume',
+ 'state': 'present',
+ 'safeguardpolicyname': 'policy_name',
+ 'policystarttime': 'YYMMDDHHMM'
+ })
+ probe_data = {
+ 'safeguardedpolicy': 'policy_name',
+ 'policystarttime': 'YYMMDDHHMM'
+ }
+ svc_run_command_mock.return_value = None
+ vg = IBMSVCVG()
+ probe_data = vg.vg_update(probe_data)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_vg_update_with_only_noownershipgroup(self, mock_svc_authorize,
+ svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volume',
+ 'state': 'present',
+ 'noownershipgroup': True,
+ })
+ probe_data = {
+ 'noownershipgroup': True
+ }
+ svc_run_command_mock.return_value = None
+ vg = IBMSVCVG()
+ probe_data = vg.vg_update(probe_data)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_vg_update_with_only_nosafeguardpolicy(self, mock_svc_authorize,
+ svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volume',
+ 'state': 'present',
+ 'nosafeguardpolicy': True,
+ })
+ probe_data = {
+ 'nosafeguardpolicy': True,
+ }
+ svc_run_command_mock.return_value = None
+ vg = IBMSVCVG()
+ probe_data = vg.vg_update(probe_data)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_vg_delete(self, mock_svc_authorize, svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volumegroup',
+ 'state': 'absent',
+ })
+ svc_run_command_mock.return_value = None
+ vg = IBMSVCVG()
+ vg.vg_delete()
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_failure_vg_delete_with_invalid_params(self, mock_svc_authorize):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volumegroup',
+ 'type': 'thinclone',
+ 'pool': 'pool0',
+ 'state': 'absent'
+ })
+ with pytest.raises(AnsibleFailJson) as exc:
+ vg = IBMSVCVG()
+ vg.vg_delete()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_vg_delete_evictvolumes(self, mock_svc_authorize, svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volumegroup',
+ 'state': 'absent',
+ 'evictvolumes': True
+ })
+ svc_run_command_mock.return_value = None
+ vg = IBMSVCVG()
+ vg.vg_delete()
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_module_for_creation_of_new_volumegroup(self, mock_svc_authorize,
+ svc_obj_info_mock,
+ svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volumegroup',
+ 'state': 'present',
+ 'ownershipgroup': 'ownershipgroup_name'
+ })
+ svc_obj_info_mock.return_value = []
+ svc_run_command_mock.return_value = {
+ 'id': 56,
+ 'message': 'success message'
+ }
+ with pytest.raises(AnsibleExitJson) as exc:
+ vg = IBMSVCVG()
+ vg.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_module_for_creation_when_volumegroup_aleady_existing(
+ self,
+ mock_svc_authorize,
+ svc_obj_info_mock,
+ svc_run_command_mock
+ ):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volumegroup',
+ 'state': 'present',
+ })
+ svc_obj_info_mock.return_value = {
+ "id": "8",
+ "name": "test_volumegroup",
+ "volume_count": "0",
+ "backup_status": "empty",
+ "last_backup_time": "",
+ "owner_id": "",
+ "owner_name": "",
+ "safeguarded_policy_id": "",
+ "safeguarded_policy_name": "",
+ "safeguarded_policy_start_time": "",
+ "snapshot_policy_name": "",
+ "snapshot_policy_suspended": "no",
+ "ignore_user_flash_copy_maps": "no",
+ "snapshot_policy_safeguarded": "no"
+ }
+ with pytest.raises(AnsibleExitJson) as exc:
+ vg = IBMSVCVG()
+ vg.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_module_while_updating_ownersipgroup(self, mock_svc_authorize,
+ soim, srcm):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volumegroup',
+ 'state': 'present',
+ 'ownershipgroup': 'new_name'
+ })
+ soim.return_value = {
+ "id": "8",
+ "name": "test_volumegroup",
+ "volume_count": "0",
+ "backup_status": "empty",
+ "last_backup_time": "",
+ "owner_id": "",
+ "owner_name": "old_name",
+ "safeguarded_policy_id": "",
+ "safeguarded_policy_name": "",
+ "safeguarded_policy_start_time": "",
+ "snapshot_policy_name": "",
+ "snapshot_policy_suspended": "no",
+ "ignore_user_flash_copy_maps": "no",
+ "snapshot_policy_safeguarded": "no"
+ }
+ srcm.return_value = None
+ with pytest.raises(AnsibleExitJson) as exc:
+ vg = IBMSVCVG()
+ vg.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_module_for_deleting_an_existing_volumegroup(self, mock_svc_authorize,
+ svc_obj_info_mock,
+ svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volumegroup',
+ 'state': 'absent',
+ })
+ svc_obj_info_mock.return_value = {
+ "id": "8",
+ "name": "test_volumegroup",
+ "volume_count": "0",
+ "backup_status": "empty",
+ "last_backup_time": "",
+ "owner_id": "",
+ "owner_name": "",
+ "safeguarded_policy_id": "",
+ "safeguarded_policy_name": "",
+ "safeguarded_policy_start_time": "",
+ "snapshot_policy_name": "",
+ "snapshot_policy_suspended": "no",
+ "ignore_user_flash_copy_maps": "no",
+ "snapshot_policy_safeguarded": "no"
+ }
+ with pytest.raises(AnsibleExitJson) as exc:
+ vg = IBMSVCVG()
+ vg.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_module_for_deleting_nonexisting_volumegroup(self, mock_svc_authorize,
+ svc_obj_info_mock,
+ svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volumegroup',
+ 'state': 'absent',
+ })
+ svc_obj_info_mock.return_value = {}
+ with pytest.raises(AnsibleExitJson) as exc:
+ vg = IBMSVCVG()
+ vg.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_volumegroup_with_snapshotpolicy(self, mock_svc_authorize,
+ svc_obj_info_mock,
+ svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volumegroup',
+ 'snapshotpolicy': 'ss_policy1',
+ 'replicationpolicy': 'rp0',
+ 'state': 'present',
+ })
+ svc_obj_info_mock.return_value = {}
+ with pytest.raises(AnsibleExitJson) as exc:
+ vg = IBMSVCVG()
+ vg.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_volumegroup_with_snapshotpolicy_idempotency(self, mock_svc_authorize,
+ svc_obj_info_mock,
+ svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volumegroup',
+ 'snapshotpolicy': 'ss_policy1',
+ 'replicationpolicy': 'rp0',
+ 'state': 'present',
+ })
+ svc_obj_info_mock.return_value = {
+ "id": "8",
+ "name": "test_volumegroup",
+ "volume_count": "0",
+ "backup_status": "empty",
+ "last_backup_time": "",
+ "owner_id": "",
+ "owner_name": "",
+ "safeguarded_policy_id": "",
+ "safeguarded_policy_name": "",
+ "safeguarded_policy_start_time": "",
+ "snapshot_policy_name": "ss_policy1",
+ "snapshot_policy_suspended": "no",
+ "ignore_user_flash_copy_maps": "no",
+ "snapshot_policy_safeguarded": "no",
+ "replication_policy_name": "rp0"
+ }
+ with pytest.raises(AnsibleExitJson) as exc:
+ vg = IBMSVCVG()
+ vg.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_volumegroup_with_safeguarded_snapshotpolicy(self,
+ mock_svc_authorize,
+ svc_obj_info_mock,
+ svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volumegroup',
+ 'snapshotpolicy': 'ss_policy1',
+ 'safeguarded': True,
+ 'ignoreuserfcmaps': 'yes',
+ 'state': 'present',
+ })
+ svc_obj_info_mock.return_value = {}
+ with pytest.raises(AnsibleExitJson) as exc:
+ vg = IBMSVCVG()
+ vg.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_update_snapshot_policy(self, mock_svc_authorize,
+ svc_obj_info_mock,
+ svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volumegroup',
+ 'snapshotpolicy': 'ss_policy2',
+ 'replicationpolicy': 'rp0',
+ 'state': 'present',
+ })
+ data = {
+ "id": "8",
+ "name": "test_volumegroup",
+ "volume_count": "0",
+ "backup_status": "empty",
+ "last_backup_time": "",
+ "owner_id": "",
+ "owner_name": "",
+ "safeguarded_policy_id": "",
+ "safeguarded_policy_name": "",
+ "safeguarded_policy_start_time": "",
+ "snapshot_policy_name": "ss_policy1",
+ "snapshot_policy_suspended": "no",
+ "ignore_user_flash_copy_maps": "no",
+ "snapshot_policy_safeguarded": "no",
+ "replication_policy_name": ""
+ }
+
+ vg = IBMSVCVG()
+ probe_data = vg.vg_probe(data)
+ self.assertTrue('snapshotpolicy' in probe_data)
+ self.assertTrue('replicationpolicy' in probe_data)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_update_safeguarded_snapshot_policy(self, mock_svc_authorize,
+ svc_obj_info_mock,
+ svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volumegroup',
+ 'snapshotpolicy': 'ss_policy2',
+ 'safeguarded': True,
+ 'ignoreuserfcmaps': 'yes',
+ 'state': 'present',
+ })
+ data = {
+ "id": "8",
+ "name": "test_volumegroup",
+ "volume_count": "0",
+ "backup_status": "empty",
+ "last_backup_time": "",
+ "owner_id": "",
+ "owner_name": "",
+ "safeguarded_policy_id": "",
+ "safeguarded_policy_name": "",
+ "safeguarded_policy_start_time": "",
+ "snapshot_policy_name": "ss_policy1",
+ "snapshot_policy_suspended": "no",
+ "ignore_user_flash_copy_maps": "no",
+ "snapshot_policy_safeguarded": "no"
+ }
+
+ vg = IBMSVCVG()
+ probe_data = vg.vg_probe(data)
+ self.assertTrue('safeguarded' in probe_data)
+ self.assertTrue('snapshotpolicy' in probe_data)
+ self.assertTrue('ignoreuserfcmaps' in probe_data)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_update_unmap_snapshot_policy(self, mock_svc_authorize,
+ svc_obj_info_mock,
+ svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volumegroup',
+ 'nosnapshotpolicy': True,
+ 'noreplicationpolicy': True,
+ 'state': 'present',
+ })
+ data = {
+ "id": "8",
+ "name": "test_volumegroup",
+ "volume_count": "0",
+ "backup_status": "empty",
+ "last_backup_time": "",
+ "owner_id": "",
+ "owner_name": "",
+ "safeguarded_policy_id": "",
+ "safeguarded_policy_name": "",
+ "safeguarded_policy_start_time": "",
+ "snapshot_policy_name": "ss_policy2",
+ "snapshot_policy_suspended": "no",
+ "ignore_user_flash_copy_maps": "no",
+ "snapshot_policy_safeguarded": "no",
+ "replication_policy_name": "rp0"
+ }
+
+ vg = IBMSVCVG()
+ probe_data = vg.vg_probe(data)
+ self.assertTrue('nosnapshotpolicy' in probe_data)
+ self.assertTrue('noreplicationpolicy' in probe_data)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_suspend_snapshot_policy_in_volumegroup(self, mock_svc_authorize,
+ svc_obj_info_mock,
+ svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volumegroup',
+ 'snapshotpolicysuspended': 'yes',
+ 'state': 'present',
+ })
+ data = {
+ "id": "8",
+ "name": "test_volumegroup",
+ "volume_count": "0",
+ "backup_status": "empty",
+ "last_backup_time": "",
+ "owner_id": "",
+ "owner_name": "",
+ "safeguarded_policy_id": "",
+ "safeguarded_policy_name": "",
+ "safeguarded_policy_start_time": "",
+ "snapshot_policy_name": "ss_policy2",
+ "snapshot_policy_suspended": "no",
+ "ignore_user_flash_copy_maps": "no",
+ "snapshot_policy_safeguarded": "no"
+ }
+
+ vg = IBMSVCVG()
+ probe_data = vg.vg_probe(data)
+ self.assertTrue('snapshotpolicysuspended' in probe_data)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_volumegroup_from_VG_snapshot(self, mock_svc_authorize,
+ svc_obj_info_mock,
+ svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volumegroup',
+ 'type': 'thinclone',
+ 'snapshot': 'snapshot1',
+ 'fromsourcegroup': 'volgrp1',
+ 'state': 'present',
+ })
+ svc_obj_info_mock.return_value = {}
+ with pytest.raises(AnsibleExitJson) as exc:
+ vg = IBMSVCVG()
+ vg.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_volumegroup.IBMSVCVG.set_parentuid')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_volumegroup_from_orphan_snapshot(self, mock_svc_authorize,
+ svc_obj_info_mock,
+ set_parentuid_mock,
+ svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volumegroup',
+ 'type': 'thinclone',
+ 'snapshot': 'snapshot1',
+ 'state': 'present',
+ })
+ svc_obj_info_mock.return_value = {}
+ vg = IBMSVCVG()
+ vg.parentuid = 5
+ with pytest.raises(AnsibleExitJson) as exc:
+ vg.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_update_storage_partition(self, mock_svc_authorize,
+ svc_obj_info_mock,
+ svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_volumegroup',
+ 'partition': 'partition1',
+ 'state': 'present'
+ })
+ data = {
+ "id": "8",
+ "name": "test_volumegroup",
+ "volume_count": "0",
+ "backup_status": "empty",
+ "last_backup_time": "",
+ "owner_id": "",
+ "owner_name": "",
+ "safeguarded_policy_id": "",
+ "safeguarded_policy_name": "",
+ "safeguarded_policy_start_time": "",
+ "snapshot_policy_name": "",
+ "snapshot_policy_suspended": "no",
+ "ignore_user_flash_copy_maps": "no",
+ "snapshot_policy_safeguarded": "no",
+ "partition_name": ""
+ }
+
+ vg = IBMSVCVG()
+ probe_data = vg.vg_probe(data)
+ self.assertTrue('partition' in probe_data)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_volumegroup.IBMSVCVG.create_transient_snapshot')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_volumegroup.IBMSVCVG.set_parentuid')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_volumegroup.IBMSVCVG.get_existing_vg')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_vg_thinclone_from_source_volumes(self, svc_authorize_mock,
+ svc_run_cmd_mock,
+ svc_get_existing_vg_mock,
+ svc_parentuid_mock,
+ create_transient_snapshot_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'vg_thinclone2',
+ 'state': 'present',
+ 'fromsourcevolumes': 'v1:d1',
+ 'type': 'thinclone',
+ 'pool': 'pool0'
+ })
+ svc_get_existing_vg_mock.return_value = {}
+ create_transient_snapshot_mock.return_value = 'snapshot_3335105753'
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ vg = IBMSVCVG()
+ vg.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_volumegroup.IBMSVCVG.get_existing_vg')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_vg_thinclone_from_source_volumes_idempotency(self, svc_authorize_mock,
+ svc_get_existing_vg_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'v1d1thclone',
+ 'state': 'present',
+ 'fromsourcevolumes': 'v1:d1',
+ 'type': 'thinclone',
+ 'pool': 'pool0'
+ })
+ svc_get_existing_vg_mock.return_value = {
+ 'id': '0',
+ 'name': 'v1d1thclone',
+ 'volume_count': '2',
+ 'backup_status': 'off',
+ 'last_backup_time': '',
+ 'owner_id': '',
+ 'owner_name': '',
+ 'safeguarded_policy_id': '',
+ 'safeguarded_policy_name': '',
+ 'safeguarded_policy_start_time': '',
+ 'replication_policy_id': '',
+ 'replication_policy_name': '',
+ 'volume_group_type': 'thinclone',
+ 'uid': '77',
+ 'source_volume_group_id': '',
+ 'source_volume_group_name': '',
+ 'parent_uid': '76',
+ 'source_snapshot_id': '0',
+ 'source_snapshot': 'snapshot_3335105753',
+ 'snapshot_count': '0',
+ 'protection_provisioned_capacity': '0.00MB',
+ 'protection_written_capacity': '0.00MB',
+ 'snapshot_policy_id': '',
+ 'snapshot_policy_name': '',
+ 'safeguarded_snapshot_count': '0',
+ 'ignore_user_flash_copy_maps': 'no',
+ 'partition_id': '',
+ 'partition_name': '',
+ 'restore_in_progress': 'no',
+ 'owner_type': 'none',
+ 'draft_partition_id': '',
+ 'draft_partition_name': '',
+ 'last_restore_time': '',
+ 'source_volumes_set': {'v1', 'd1'},
+ 'source_volumes_pool_set': {'pool0'}
+ }
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ vg = IBMSVCVG()
+ vg.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ # Test when existing clone/thinclone with same name but different source volumes
+ # and user tries to create a normal volumegroup, it should fail
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_volumegroup.IBMSVCVG.get_existing_vg')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_normal_vg_with_existing_thinclone_vg_name(self, svc_authorize_mock,
+ svc_get_existing_vg_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'v1d1thclone',
+ 'state': 'present',
+ 'pool': 'pool0'
+ })
+ svc_get_existing_vg_mock.return_value = {
+ 'id': '0',
+ 'name': 'v1d1thclone',
+ 'volume_count': '2',
+ 'backup_status': 'off',
+ 'last_backup_time': '',
+ 'owner_id': '',
+ 'owner_name': '',
+ 'safeguarded_policy_id': '',
+ 'safeguarded_policy_name': '',
+ 'safeguarded_policy_start_time': '',
+ 'replication_policy_id': '',
+ 'replication_policy_name': '',
+ 'volume_group_type': 'thinclone',
+ 'uid': '77',
+ 'source_volume_group_id': '',
+ 'source_volume_group_name': '',
+ 'parent_uid': '76',
+ 'source_snapshot_id': '0',
+ 'source_snapshot': 'snapshot_3335105753',
+ 'snapshot_count': '0',
+ 'protection_provisioned_capacity': '0.00MB',
+ 'protection_written_capacity': '0.00MB',
+ 'snapshot_policy_id': '',
+ 'snapshot_policy_name': '',
+ 'safeguarded_snapshot_count': '0',
+ 'ignore_user_flash_copy_maps': 'no',
+ 'partition_id': '',
+ 'partition_name': '',
+ 'restore_in_progress': 'no',
+ 'owner_type': 'none',
+ 'draft_partition_id': '',
+ 'draft_partition_name': '',
+ 'last_restore_time': '',
+ 'source_volumes_set': {'v1', 'd1'},
+ 'source_volumes_pool_set': {'pool0'}
+ }
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ vg = IBMSVCVG()
+ vg.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+ self.assertEqual(exc.value.args[0]['msg'], 'Existing thinclone volumegroup found.')
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_volumegroup.IBMSVCVG.get_existing_vg')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_modify_vg_source_volumes(self,
+ svc_authorize_mock,
+ svc_get_existing_vg_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'v1d1thclone',
+ 'state': 'present',
+ 'fromsourcevolumes': 'v3:d1',
+ 'pool': 'pool0'
+ })
+ svc_get_existing_vg_mock.return_value = {
+ 'id': '0',
+ 'name': 'v1d1thclone',
+ 'volume_count': '2',
+ 'backup_status': 'off',
+ 'last_backup_time': '',
+ 'owner_id': '',
+ 'owner_name': '',
+ 'safeguarded_policy_id': '',
+ 'safeguarded_policy_name': '',
+ 'safeguarded_policy_start_time': '',
+ 'replication_policy_id': '',
+ 'replication_policy_name': '',
+ 'volume_group_type': 'thinclone',
+ 'uid': '77',
+ 'source_volume_group_id': '',
+ 'source_volume_group_name': '',
+ 'parent_uid': '76',
+ 'source_snapshot_id': '0',
+ 'source_snapshot': 'snapshot_3335105753',
+ 'snapshot_count': '0',
+ 'protection_provisioned_capacity': '0.00MB',
+ 'protection_written_capacity': '0.00MB',
+ 'snapshot_policy_id': '',
+ 'snapshot_policy_name': '',
+ 'safeguarded_snapshot_count': '0',
+ 'ignore_user_flash_copy_maps': 'no',
+ 'partition_id': '',
+ 'partition_name': '',
+ 'restore_in_progress': 'no',
+ 'owner_type': 'none',
+ 'draft_partition_id': '',
+ 'draft_partition_name': '',
+ 'last_restore_time': '',
+ 'source_volumes_set': {'v1', 'd1'},
+ 'source_volumes_pool_set': {'pool0'}
+ }
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ vg = IBMSVCVG()
+ vg.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+ self.assertEqual(exc.value.args[0]['msg'], 'Parameter [fromsourcevolumes] is invalid for modifying volumegroup.')
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_volumegroup.IBMSVCVG.get_existing_vg')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_failure_update_thinclone_vg_pool(self,
+ svc_authorize_mock,
+ svc_get_existing_vg_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'v1d1thclone',
+ 'state': 'present',
+ 'fromsourcevolumes': 'v1:d1',
+ 'type': 'thinclone',
+ 'pool': 'pool1'
+ })
+ svc_get_existing_vg_mock.return_value = {
+ 'id': '0',
+ 'name': 'v1d1thclone',
+ 'volume_count': '2',
+ 'backup_status': 'off',
+ 'last_backup_time': '',
+ 'owner_id': '',
+ 'owner_name': '',
+ 'safeguarded_policy_id': '',
+ 'safeguarded_policy_name': '',
+ 'safeguarded_policy_start_time': '',
+ 'replication_policy_id': '',
+ 'replication_policy_name': '',
+ 'volume_group_type': 'thinclone',
+ 'uid': '77',
+ 'source_volume_group_id': '',
+ 'source_volume_group_name': '',
+ 'parent_uid': '76',
+ 'source_snapshot_id': '0',
+ 'source_snapshot': 'snapshot_3335105753',
+ 'snapshot_count': '0',
+ 'protection_provisioned_capacity': '0.00MB',
+ 'protection_written_capacity': '0.00MB',
+ 'snapshot_policy_id': '',
+ 'snapshot_policy_name': '',
+ 'safeguarded_snapshot_count': '0',
+ 'ignore_user_flash_copy_maps': 'no',
+ 'partition_id': '',
+ 'partition_name': '',
+ 'restore_in_progress': 'no',
+ 'owner_type': 'none',
+ 'draft_partition_id': '',
+ 'draft_partition_name': '',
+ 'last_restore_time': '',
+ 'source_volumes_set': {'v1', 'd1'},
+ 'source_volumes_pool_set': {'pool0'}
+ }
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ vg = IBMSVCVG()
+ vg.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+ self.assertEqual(exc.value.args[0]['msg'], 'Parameter [pool] is invalid for modifying volumegroup.')
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_transient_snapshot(self,
+ svc_authorize_mock,
+ svc_run_cmd_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'vg_thinclone2',
+ 'state': 'present',
+ 'fromsourcevolumes': 'v1:d1',
+ 'type': 'thinclone',
+ 'pool': 'pool0'
+ })
+
+ vg = IBMSVCVG()
+ snapshot_name = vg.create_transient_snapshot()
+ self.assertTrue('snapshot_' in snapshot_name)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_volumegroup.IBMSVCVG.create_transient_snapshot')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_volumegroup.IBMSVCVG.set_parentuid')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_volumegroup.IBMSVCVG.get_existing_vg')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_vg_clone_from_source_volumes(self,
+ svc_authorize_mock,
+ svc_run_cmd_mock,
+ svc_get_existing_vg_mock,
+ svc_parentuid_mock,
+ create_transient_snapshot_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'vg_clone',
+ 'state': 'present',
+ 'fromsourcevolumes': 'v1:d1',
+ 'type': 'clone',
+ 'pool': 'pool0'
+ })
+ svc_get_existing_vg_mock.return_value = {}
+ create_transient_snapshot_mock.return_value = 'snapshot_3335105753'
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ vg = IBMSVCVG()
+ vg.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_volumegroup.IBMSVCVG.get_existing_vg')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_vg_clone_from_source_volumes_idempotency(self,
+ svc_authorize_mock,
+ svc_get_existing_vg_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'v1d1clone',
+ 'state': 'present',
+ 'fromsourcevolumes': 'v1:d1',
+ 'type': 'clone',
+ 'pool': 'pool0'
+ })
+ svc_get_existing_vg_mock.return_value = {
+ 'id': '0',
+ 'name': 'v1d1clone',
+ 'volume_count': '2',
+ 'backup_status': 'off',
+ 'last_backup_time': '',
+ 'owner_id': '',
+ 'owner_name': '',
+ 'safeguarded_policy_id': '',
+ 'safeguarded_policy_name': '',
+ 'safeguarded_policy_start_time': '',
+ 'replication_policy_id': '',
+ 'replication_policy_name': '',
+ 'volume_group_type': '',
+ 'uid': '77',
+ 'source_volume_group_id': '',
+ 'source_volume_group_name': '',
+ 'parent_uid': '76',
+ 'source_snapshot_id': '0',
+ 'source_snapshot': 'snapshot_3335105753',
+ 'snapshot_count': '0',
+ 'protection_provisioned_capacity': '0.00MB',
+ 'protection_written_capacity': '0.00MB',
+ 'snapshot_policy_id': '',
+ 'snapshot_policy_name': '',
+ 'safeguarded_snapshot_count': '0',
+ 'ignore_user_flash_copy_maps': 'no',
+ 'partition_id': '',
+ 'partition_name': '',
+ 'restore_in_progress': 'no',
+ 'owner_type': 'none',
+ 'draft_partition_id': '',
+ 'draft_partition_name': '',
+ 'last_restore_time': '',
+ 'source_volumes_set': {'v1', 'd1'},
+ 'source_volumes_pool_set': {'pool0'}
+ }
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ vg = IBMSVCVG()
+ vg.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ # Test create clone with different source volumes but a cloned VG already exists
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_volumegroup.IBMSVCVG.get_existing_vg')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_modify_vg_source_volumes(self,
+ svc_authorize_mock,
+ svc_get_existing_vg_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'v1d1clone',
+ 'state': 'present',
+ 'type': 'clone',
+ 'fromsourcevolumes': 'v3:d1',
+ 'pool': 'pool0'
+ })
+ svc_get_existing_vg_mock.return_value = {
+ 'id': '0',
+ 'name': 'v1d1clone',
+ 'volume_count': '2',
+ 'backup_status': 'off',
+ 'last_backup_time': '',
+ 'owner_id': '',
+ 'owner_name': '',
+ 'safeguarded_policy_id': '',
+ 'safeguarded_policy_name': '',
+ 'safeguarded_policy_start_time': '',
+ 'replication_policy_id': '',
+ 'replication_policy_name': '',
+ 'volume_group_type': '',
+ 'uid': '77',
+ 'source_volume_group_id': '',
+ 'source_volume_group_name': '',
+ 'parent_uid': '76',
+ 'source_snapshot_id': '0',
+ 'source_snapshot': 'snapshot_3335105753',
+ 'snapshot_count': '0',
+ 'protection_provisioned_capacity': '0.00MB',
+ 'protection_written_capacity': '0.00MB',
+ 'snapshot_policy_id': '',
+ 'snapshot_policy_name': '',
+ 'safeguarded_snapshot_count': '0',
+ 'ignore_user_flash_copy_maps': 'no',
+ 'partition_id': '',
+ 'partition_name': '',
+ 'restore_in_progress': 'no',
+ 'owner_type': 'none',
+ 'draft_partition_id': '',
+ 'draft_partition_name': '',
+ 'last_restore_time': '',
+ 'source_volumes_set': {'v1', 'd1'},
+ 'source_volumes_pool_set': {'pool0'}
+ }
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ vg = IBMSVCVG()
+ vg.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+ self.assertEqual(exc.value.args[0]['msg'], 'Parameter [fromsourcevolumes] is invalid for modifying volumegroup.')
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_manage_volumegroup.IBMSVCVG.get_existing_vg')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_failure_update_cloned_vg_pool(self,
+ svc_authorize_mock,
+ svc_get_existing_vg_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'v1d1thclone',
+ 'state': 'present',
+ 'fromsourcevolumes': 'v1:d1',
+ 'type': 'clone',
+ 'pool': 'pool1'
+ })
+ svc_get_existing_vg_mock.return_value = {
+ 'id': '0',
+ 'name': 'v1d1thclone',
+ 'volume_count': '2',
+ 'backup_status': 'off',
+ 'last_backup_time': '',
+ 'owner_id': '',
+ 'owner_name': '',
+ 'safeguarded_policy_id': '',
+ 'safeguarded_policy_name': '',
+ 'safeguarded_policy_start_time': '',
+ 'replication_policy_id': '',
+ 'replication_policy_name': '',
+ 'volume_group_type': '',
+ 'uid': '77',
+ 'source_volume_group_id': '',
+ 'source_volume_group_name': '',
+ 'parent_uid': '76',
+ 'source_snapshot_id': '0',
+ 'source_snapshot': 'snapshot_3335105753',
+ 'snapshot_count': '0',
+ 'protection_provisioned_capacity': '0.00MB',
+ 'protection_written_capacity': '0.00MB',
+ 'snapshot_policy_id': '',
+ 'snapshot_policy_name': '',
+ 'safeguarded_snapshot_count': '0',
+ 'ignore_user_flash_copy_maps': 'no',
+ 'partition_id': '',
+ 'partition_name': '',
+ 'restore_in_progress': 'no',
+ 'owner_type': 'none',
+ 'draft_partition_id': '',
+ 'draft_partition_name': '',
+ 'last_restore_time': '',
+ 'source_volumes_set': {'v1', 'd1'},
+ 'source_volumes_pool_set': {'pool0'}
+ }
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ vg = IBMSVCVG()
+ vg.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+ self.assertEqual(exc.value.args[0]['msg'], 'Parameter [pool] is invalid for modifying volumegroup.')
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_mdisk.py b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_mdisk.py
new file mode 100644
index 000000000..df083f6a8
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_mdisk.py
@@ -0,0 +1,439 @@
+# Copyright (C) 2020 IBM CORPORATION
+# Author(s): Peng Wang <wangpww@cn.ibm.com>
+#
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+""" unit tests IBM Storage Virtualize Ansible module: ibm_svc_mdisk """
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import unittest
+import pytest
+import json
+from mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi
+from ansible_collections.ibm.storage_virtualize.plugins.modules.ibm_svc_mdisk import IBMSVCmdisk
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module
+ creation """
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the
+ test case """
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the
+ test case """
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an
+ exception """
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an
+ exception """
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class TestIBMSVCmdisk(unittest.TestCase):
+ """ a group of related Unit Tests"""
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def setUp(self, connect):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.restapi = IBMSVCRestApi(self.mock_module_helper, '1.2.3.4',
+ 'domain.ibm.com', 'username', 'password',
+ False, 'test.log', '')
+
+ def set_default_args(self):
+ return dict({
+ 'name': 'test',
+ 'state': 'present'
+ })
+
+ def test_module_fail_when_required_args_missing(self):
+ """ required arguments are reported as errors """
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ IBMSVCmdisk()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_get_existing_mdisk(self, svc_authorize_mock, svc_obj_info_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_get_existing_mdisk',
+ 'mdiskgrp': 'Ansible-Pool'
+ })
+ mdisk_ret = [{"id": "0", "name": "mdisk_Ansible_collections",
+ "status": "online", "mode": "array", "mdisk_grp_id": "0",
+ "mdisk_grp_name": "Pool_Ansible_collections",
+ "capacity": "5.2TB", "ctrl_LUN_#": "",
+ "controller_name": "", "UID": "", "tier": "tier0_flash",
+ "encrypt": "no", "site_id": "", "site_name": "",
+ "distributed": "no", "dedupe": "no",
+ "over_provisioned": "no", "supports_unmap": "yes"}]
+ svc_obj_info_mock.return_value = mdisk_ret
+ mdisk = IBMSVCmdisk().mdisk_exists('test_get_existing_mdisk')
+ self.assertEqual('mdisk_Ansible_collections', mdisk['name'])
+ self.assertEqual('0', mdisk['id'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_mdisk.IBMSVCmdisk.mdisk_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_mdisk_create_get_existing_mdisk_called(self, svc_authorize_mock,
+ get_existing_mdisk_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_mdisk_create_get_existing_mdisk_called',
+ 'mdiskgrp': 'Pool'
+ })
+ mdisk_created = IBMSVCmdisk()
+ with pytest.raises(AnsibleExitJson) as exc:
+ mdisk_created.apply()
+ get_existing_mdisk_mock.assert_called_with("test_mdisk_create_get_existing_mdisk_called")
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_mdisk.IBMSVCmdisk.mdisk_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_mdisk_failed_since_missed_required_param(
+ self, svc_authorize_mock, get_existing_mdisk_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_create_mdisk_failed_since_missed_required_param',
+ 'mdiskgrp': 'Pool'
+ })
+ get_existing_mdisk_mock.return_value = []
+ mdisk_created = IBMSVCmdisk()
+ with pytest.raises(AnsibleFailJson) as exc:
+ mdisk_created.apply()
+ self.assertTrue(exc.value.args[0]['failed'])
+ get_existing_mdisk_mock.assert_called_with("test_create_mdisk_failed_since_missed_required_param")
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_mdisk.IBMSVCmdisk.mdisk_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_mdisk_failed_incorrect_parameter(
+ self, svc_authorize_mock, get_existing_mdisk_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_create_mdisk_failed_incorrect_parameter',
+ 'drivecount': '1',
+ 'mdiskgrp': 'Pool'
+ })
+ get_existing_mdisk_mock.return_value = []
+ mdisk_created = IBMSVCmdisk()
+ with pytest.raises(AnsibleFailJson) as exc:
+ mdisk_created.apply()
+ self.assertTrue(exc.value.args[0]['failed'])
+ get_existing_mdisk_mock.assert_called_with("test_create_mdisk_failed_incorrect_parameter")
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_mdisk.IBMSVCmdisk.mdisk_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_mdisk.IBMSVCmdisk.mdisk_probe')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_mdisk_but_mdisk_existed(self, svc_authorize_mock,
+ mdisk_probe_mock,
+ get_existing_mdisk_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_create_mdisk_but_mdisk_existed',
+ 'mdiskgrp': 'Pool'
+ })
+ mdisk_ret = [{"id": "0", "name": "mdisk_Ansible_collections",
+ "status": "online", "mode": "array", "mdisk_grp_id": "0",
+ "mdisk_grp_name": "Pool_Ansible_collections",
+ "capacity": "5.2TB", "ctrl_LUN_#": "",
+ "controller_name": "", "UID": "", "tier": "tier0_flash",
+ "encrypt": "no", "site_id": "", "site_name": "",
+ "distributed": "no", "dedupe": "no",
+ "over_provisioned": "no", "supports_unmap": "yes"}]
+ get_existing_mdisk_mock.return_value = mdisk_ret
+ mdisk_probe_mock.return_value = []
+ mdisk_created = IBMSVCmdisk()
+ with pytest.raises(AnsibleExitJson) as exc:
+ mdisk_created.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+ get_existing_mdisk_mock.assert_called_with("test_create_mdisk_but_mdisk_existed")
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_mdisk.IBMSVCmdisk.mdisk_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_mdisk.IBMSVCmdisk.mdisk_create')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_mdisk_successfully(self, svc_authorize_mock,
+ mdisk_create_mock,
+ get_existing_mdisk_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_create_mdisk_successfully',
+ 'level': 'raid0',
+ 'drive': '5:6',
+ 'encrypt': 'no',
+ 'mdiskgrp': 'Pool'
+ })
+ mdisk = {u'message': u'Mdisk, id [0],'
+ u'successfully created', u'id': u'0'}
+ mdisk_create_mock.return_value = mdisk
+ get_existing_mdisk_mock.return_value = []
+ mdisk_created = IBMSVCmdisk()
+ with pytest.raises(AnsibleExitJson) as exc:
+ mdisk_created.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+ get_existing_mdisk_mock.assert_called_with("test_create_mdisk_successfully")
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_mdisk.IBMSVCmdisk.mdisk_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_mdisk.IBMSVCmdisk.mdisk_create')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_draid_successfully(self, svc_authorize_mock,
+ mdisk_create_mock,
+ get_existing_mdisk_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_create_mdisk_successfully',
+ 'level': 'raid0',
+ 'driveclass': '1',
+ 'drivecount': '2',
+ 'stripewidth': '2',
+ 'encrypt': 'no',
+ 'mdiskgrp': 'Pool'
+ })
+ mdisk = {u'message': u'Mdisk, id [0],'
+ u'successfully created', u'id': u'0'}
+ mdisk_create_mock.return_value = mdisk
+ get_existing_mdisk_mock.return_value = []
+ mdisk_created = IBMSVCmdisk()
+ with pytest.raises(AnsibleExitJson) as exc:
+ mdisk_created.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+ get_existing_mdisk_mock.assert_called_with("test_create_mdisk_successfully")
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_mdisk.IBMSVCmdisk.mdisk_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_mdisk_failed_since_no_message_in_result(
+ self, svc_authorize_mock, svc_run_command_mock,
+ get_existing_mdisk_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_create_mdisk_successfully',
+ 'level': 'raid0',
+ 'drive': '5:6',
+ 'encrypt': 'no',
+ 'mdiskgrp': 'Pool'
+ })
+ mdisk = {u'id': u'0'}
+ svc_run_command_mock.return_value = mdisk
+ get_existing_mdisk_mock.return_value = []
+ mdisk_created = IBMSVCmdisk()
+ with pytest.raises(AnsibleFailJson) as exc:
+ mdisk_created.apply()
+ get_existing_mdisk_mock.assert_called_with("test_create_mdisk_successfully")
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_mdisk.IBMSVCmdisk.mdisk_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_mdisk_rename_with_state_absent(self, mock_auth, mock_old, mock_cmd, get_existing_mdisk_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'old_name': 'name',
+ 'name': 'new_name',
+ 'state': 'absent',
+ 'mdiskgrp': 'Pool'
+ })
+ mock_old.return_value = [
+ {
+ "id": "1", "name": "ansible_pool"
+ }
+ ]
+ get_existing_mdisk_mock.return_value = []
+ with pytest.raises(AnsibleFailJson) as exc:
+ obj = IBMSVCmdisk()
+ obj.apply()
+ self.assertEqual(True, exc.value.args[0]["failed"])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_mdisk_rename(self, mock_auth, mock_old, mock_cmd):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'old_name': 'name',
+ 'name': 'new_name',
+ 'state': 'present',
+ 'mdiskgrp': 'Pool'
+ })
+ mock_old.return_value = [
+ {
+ "id": "1", "name": "ansible_pool"
+ }
+ ]
+ arg_data = []
+ mock_cmd.return_value = None
+ v = IBMSVCmdisk()
+ data = v.mdisk_rename(arg_data)
+ self.assertTrue(data, 'mdisk [name] has been successfully rename to [new_name].')
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_mdisk.IBMSVCmdisk.mdisk_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_delete_mdisk_but_mdisk_not_existed(self, svc_authorize_mock,
+ get_existing_mdisk_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'absent',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_delete_mdisk_but_mdisk_not_existed',
+ 'mdiskgrp': 'Pool'
+ })
+ get_existing_mdisk_mock.return_value = []
+ mdisk_deleted = IBMSVCmdisk()
+ with pytest.raises(AnsibleExitJson) as exc:
+ mdisk_deleted.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+ get_existing_mdisk_mock.assert_called_with("test_delete_mdisk_but_mdisk_not_existed")
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_mdisk.IBMSVCmdisk.mdisk_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_delete_mdisk_invalid_parameter(self, svc_authorize_mock,
+ get_existing_mdisk_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'absent',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_delete_mdisk_invalid_parameter',
+ 'driveclass': '1',
+ 'mdiskgrp': 'Pool'
+ })
+ get_existing_mdisk_mock.return_value = []
+ with pytest.raises(AnsibleFailJson) as exc:
+ obj = IBMSVCmdisk()
+ obj.apply()
+ self.assertEqual(True, exc.value.args[0]["failed"])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_mdisk.IBMSVCmdisk.mdisk_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_mdisk.IBMSVCmdisk.mdisk_delete')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_delete_mdisk_successfully(self, svc_authorize_mock,
+ mdisk_delete_mock,
+ get_existing_mdisk_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'absent',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_delete_mdisk_successfully',
+ 'mdiskgrp': 'Pool'
+ })
+ mdisk_ret = [{"id": "0", "name": "mdisk_Ansible_collections",
+ "status": "online", "mode": "array", "mdisk_grp_id": "0",
+ "mdisk_grp_name": "Pool_Ansible_collections",
+ "capacity": "5.2TB", "ctrl_LUN_#": "",
+ "controller_name": "", "UID": "", "tier": "tier0_flash",
+ "encrypt": "no", "site_id": "", "site_name": "",
+ "distributed": "no", "dedupe": "no",
+ "over_provisioned": "no", "supports_unmap": "yes"}]
+ get_existing_mdisk_mock.return_value = mdisk_ret
+ mdisk_deleted = IBMSVCmdisk()
+ with pytest.raises(AnsibleExitJson) as exc:
+ mdisk_deleted.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+ get_existing_mdisk_mock.assert_called_with("test_delete_mdisk_successfully")
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_mdiskgrp.py b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_mdiskgrp.py
new file mode 100644
index 000000000..7a5da5f1b
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_mdiskgrp.py
@@ -0,0 +1,894 @@
+# Copyright (C) 2020 IBM CORPORATION
+# Author(s): Peng Wang <wangpww@cn.ibm.com>
+#
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+""" unit tests IBM Storage Virtualize Ansible module: ibm_svc_mdiskgrp """
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import unittest
+import pytest
+import json
+from mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi
+from ansible_collections.ibm.storage_virtualize.plugins.modules.ibm_svc_mdiskgrp import IBMSVCmdiskgrp
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module
+ creation """
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the
+ test case """
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the
+ test case """
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an
+ exception """
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an
+ exception """
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class TestIBMSVCmdiskgrp(unittest.TestCase):
+ """ a group of related Unit Tests"""
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def setUp(self, connect):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.restapi = IBMSVCRestApi(self.mock_module_helper, '1.2.3.4',
+ 'domain.ibm.com', 'username', 'password',
+ False, 'test.log', '')
+
+ def set_default_args(self):
+ return dict({
+ 'name': 'test',
+ 'state': 'present'
+ })
+
+ def test_module_fail_when_required_args_missing(self):
+ """ required arguments are reported as errors """
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ IBMSVCmdiskgrp()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_get_existing_pool(self, svc_authorize_mock, svc_obj_info_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_get_existing_pool',
+ })
+ pool_ret = {"id": "0", "name": "Pool_Ansible_collections",
+ "status": "online", "mdisk_count": "1", "vdisk_count": "1",
+ "capacity": "5.23TB", "extent_size": "1024",
+ "free_capacity": "5.23TB", "virtual_capacity": "4.00GB",
+ "used_capacity": "4.00GB", "real_capacity": "4.00GB",
+ "overallocation": "0", "warning": "0", "easy_tier": "on",
+ "easy_tier_status": "balanced",
+ "compression_active": "no",
+ "compression_virtual_capacity": "0.00MB",
+ "compression_compressed_capacity": "0.00MB",
+ "compression_uncompressed_capacity": "0.00MB",
+ "parent_mdisk_grp_id": "0",
+ "parent_mdisk_grp_name": "Pool_Ansible_collections",
+ "child_mdisk_grp_count": "0",
+ "child_mdisk_grp_capacity": "0.00MB", "type": "parent",
+ "encrypt": "no", "owner_type": "none", "owner_id": "",
+ "owner_name": "", "site_id": "", "site_name": "",
+ "data_reduction": "no",
+ "used_capacity_before_reduction": "0.00MB",
+ "used_capacity_after_reduction": "0.00MB",
+ "overhead_capacity": "0.00MB",
+ "deduplication_capacity_saving": "0.00MB",
+ "reclaimable_capacity": "0.00MB",
+ "easy_tier_fcm_over_allocation_max": "100%"}
+ svc_obj_info_mock.return_value = pool_ret
+ pool = IBMSVCmdiskgrp().mdiskgrp_exists('test_get_existing_pool')
+ self.assertEqual('Pool_Ansible_collections', pool['name'])
+ self.assertEqual('0', pool['id'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_mdiskgrp.IBMSVCmdiskgrp.mdiskgrp_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_pool_create_get_existing_pool_called(self, svc_authorize_mock, get_existing_pool_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_pool_create_get_existing_pool_called',
+ })
+ pool_created = IBMSVCmdiskgrp()
+ with pytest.raises(AnsibleExitJson) as exc:
+ pool_created.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+ get_existing_pool_mock.assert_called_with("test_pool_create_get_existing_pool_called")
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_mdiskgrp.IBMSVCmdiskgrp.mdiskgrp_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_pool_create_with_provisioning_policy(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ get_existing_pool_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_pool_create_get_existing_pool_called',
+ 'provisioningpolicy': 'pp0',
+ 'ext': True
+ })
+ get_existing_pool_mock.return_value = {}
+ svc_run_command_mock.return_value = {
+ u'message': u'Storage pool, id [0], '
+ u'successfully created',
+ u'id': u'0'
+ }
+ pool_created = IBMSVCmdiskgrp()
+ with pytest.raises(AnsibleExitJson) as exc:
+ pool_created.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+ get_existing_pool_mock.assert_called_with("test_pool_create_get_existing_pool_called")
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_mdiskgrp.IBMSVCmdiskgrp.mdiskgrp_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_pool_create_with_provisioning_policy_idempotency(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ get_existing_pool_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_pool_create_get_existing_pool_called',
+ 'provisioningpolicy': 'pp0',
+ 'ext': True
+ })
+ get_existing_pool_mock.return_value = {
+ "id": "0",
+ "name": "test_pool_create_get_existing_pool_called",
+ "provisioning_policy_name": "pp0"
+ }
+ pool_created = IBMSVCmdiskgrp()
+ with pytest.raises(AnsibleExitJson) as exc:
+ pool_created.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+ get_existing_pool_mock.assert_called_with("test_pool_create_get_existing_pool_called")
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_mdiskgrp.IBMSVCmdiskgrp.mdiskgrp_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_pool_create_with_ownership_group(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ get_existing_pool_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_pool_create_get_existing_pool_called',
+ 'ownershipgroup': 'owner0',
+ 'ext': True
+ })
+ get_existing_pool_mock.return_value = {}
+ svc_run_command_mock.return_value = {
+ u'message': u'Storage pool, id [0], '
+ u'successfully created',
+ u'id': u'0'
+ }
+ pool_created = IBMSVCmdiskgrp()
+ with pytest.raises(AnsibleExitJson) as exc:
+ pool_created.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+ get_existing_pool_mock.assert_called_with("test_pool_create_get_existing_pool_called")
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_mdiskgrp.IBMSVCmdiskgrp.mdiskgrp_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_pool_create_with_ownership_group_idempotency(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ get_existing_pool_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_pool_create_get_existing_pool_called',
+ 'ownershipgroup': 'owner0',
+ 'ext': True
+ })
+ get_existing_pool_mock.return_value = {
+ "id": "0",
+ "name": "test_pool_create_get_existing_pool_called",
+ "owner_name": "owner0"
+ }
+ pool_created = IBMSVCmdiskgrp()
+ with pytest.raises(AnsibleExitJson) as exc:
+ pool_created.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+ get_existing_pool_mock.assert_called_with("test_pool_create_get_existing_pool_called")
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_pool_create_with_replicationpoollinkuid_failed(self,
+ svc_authorize_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_pool_create_get_existing_pool_called',
+ 'provisioningpolicy': 'pp0',
+ 'replicationpoollinkuid': '000000000000000100000123456789C4',
+ 'ext': True
+ })
+ message = 'Following parameters are required together: replicationpoollinkuid, replication_partner_clusterid'
+ with pytest.raises(AnsibleFailJson) as exc:
+ IBMSVCmdiskgrp()
+ self.assertTrue(exc.value.args[0]['failed'])
+ self.assertEqual(exc.value.args[0]['msg'], message)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_mdiskgrp.IBMSVCmdiskgrp.mdiskgrp_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_pool_create_with_replicationpoollinkuid(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ svc_obj_info_mock,
+ get_existing_pool_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_pool_create_get_existing_pool_called',
+ 'provisioningpolicy': 'pp0',
+ 'replicationpoollinkuid': '000000000000000100000123456789C4',
+ 'replication_partner_clusterid': 'x.x.x.x',
+ 'ext': True
+ })
+ get_existing_pool_mock.return_value = {}
+ svc_run_command_mock.return_value = {
+ u'message': u'Storage pool, id [0], '
+ u'successfully created',
+ u'id': u'0'
+ }
+ svc_obj_info_mock.return_value = {
+ "id": "000002022A104B10",
+ "partnership_index": "1"
+ }
+ pool_created = IBMSVCmdiskgrp()
+ with pytest.raises(AnsibleExitJson) as exc:
+ pool_created.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+ get_existing_pool_mock.assert_called_with("test_pool_create_get_existing_pool_called")
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_mdiskgrp.IBMSVCmdiskgrp.mdiskgrp_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_pool_create_with_replicationpoollinkuid_idempotency(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ svc_obj_info_mock,
+ get_existing_pool_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_pool_create_get_existing_pool_called',
+ 'provisioningpolicy': 'pp0',
+ 'replicationpoollinkuid': '000000000000000100000123456789C4',
+ 'replication_partner_clusterid': 'x.x.x.x',
+ 'ext': True
+ })
+ get_existing_pool_mock.return_value = {
+ "id": 0,
+ "name": "test_pool_create_get_existing_pool_called",
+ "replication_pool_link_uid": "000000000000000100000123456789C4",
+ "provisioning_policy_name": "pp0",
+ "replication_pool_linked_systems_mask": "0000000000000000000000000000000000000000000000000000000000000010"
+ }
+ svc_run_command_mock.return_value = {
+ u'message': u'Storage pool, id [0], '
+ u'successfully created',
+ u'id': u'0'
+ }
+ svc_obj_info_mock.return_value = {
+ "id": "000002022A104B10",
+ "partnership_index": "1"
+ }
+ pool_created = IBMSVCmdiskgrp()
+ with pytest.raises(AnsibleExitJson) as exc:
+ pool_created.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+ get_existing_pool_mock.assert_called_with("test_pool_create_get_existing_pool_called")
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_mdiskgrp.IBMSVCmdiskgrp.mdiskgrp_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_pool_update_with_replicationpoollinkuid(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ svc_obj_info_mock,
+ get_existing_pool_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_pool_create_get_existing_pool_called',
+ 'provisioningpolicy': 'pp0',
+ 'replicationpoollinkuid': '000000000000000100000123456789C4',
+ 'replication_partner_clusterid': 'x.x.x.x',
+ 'ext': True
+ })
+ get_existing_pool_mock.return_value = {
+ 'id': 0,
+ 'name': 'test_pool_create_get_existing_pool_called',
+ 'replication_pool_link_uid': '000000000000000100000123456789C5',
+ 'replication_pool_linked_systems_mask': '0000000000000000000000000000000000000000000000000000000000000100',
+ 'provisioning_policy_name': ''
+ }
+ svc_run_command_mock.return_value = {
+ u'message': u'Storage pool, id [0], '
+ u'successfully created',
+ u'id': u'0'
+ }
+ svc_obj_info_mock.return_value = {
+ "id": "000002022A104B10",
+ "partnership_index": "1"
+ }
+ pool_created = IBMSVCmdiskgrp()
+ with pytest.raises(AnsibleExitJson) as exc:
+ pool_created.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+ get_existing_pool_mock.assert_called_with("test_pool_create_get_existing_pool_called")
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_mdiskgrp.IBMSVCmdiskgrp.mdiskgrp_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_pool_update_with_replicationpoollinkuid_idempotency(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ svc_obj_info_mock,
+ get_existing_pool_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_pool_create_get_existing_pool_called',
+ 'provisioningpolicy': 'pp0',
+ 'replicationpoollinkuid': '000000000000000100000123456789C5',
+ 'replication_partner_clusterid': 'x.x.x.x',
+ 'ext': True
+ })
+ get_existing_pool_mock.return_value = {
+ 'id': 0,
+ 'name': 'test_pool_create_get_existing_pool_called',
+ 'replication_pool_link_uid': '000000000000000100000123456789C5',
+ 'replication_pool_linked_systems_mask': '0000000000000000000000000000000000000000000000000000000000000010',
+ 'provisioning_policy_name': 'pp0'
+ }
+ svc_run_command_mock.return_value = {
+ u'message': u'Storage pool, id [0], '
+ u'successfully created',
+ u'id': u'0'
+ }
+ svc_obj_info_mock.return_value = {
+ "id": "000002022A104B10",
+ "partnership_index": "1"
+ }
+ pool_created = IBMSVCmdiskgrp()
+ with pytest.raises(AnsibleExitJson) as exc:
+ pool_created.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+ get_existing_pool_mock.assert_called_with("test_pool_create_get_existing_pool_called")
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_mdiskgrp.IBMSVCmdiskgrp.mdiskgrp_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_pool_update_with_provisioning_policy(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ get_existing_pool_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_pool_create_get_existing_pool_called',
+ 'provisioningpolicy': 'pp0'
+ })
+ get_existing_pool_mock.return_value = {
+ "id": "0",
+ "name": "test_pool_create_get_existing_pool_called",
+ "provisioning_policy_name": ""
+ }
+ pool_created = IBMSVCmdiskgrp()
+ with pytest.raises(AnsibleExitJson) as exc:
+ pool_created.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+ get_existing_pool_mock.assert_called_with("test_pool_create_get_existing_pool_called")
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_mdiskgrp.IBMSVCmdiskgrp.mdiskgrp_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_discard_provisioning_policy_from_pool(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ get_existing_pool_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_pool_create_get_existing_pool_called',
+ 'noprovisioningpolicy': True
+ })
+ get_existing_pool_mock.return_value = {
+ "id": "0",
+ "name": "test_pool_create_get_existing_pool_called",
+ "provisioning_policy_name": "pp0"
+ }
+ pool_created = IBMSVCmdiskgrp()
+ with pytest.raises(AnsibleExitJson) as exc:
+ pool_created.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+ get_existing_pool_mock.assert_called_with("test_pool_create_get_existing_pool_called")
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_mdiskgrp.IBMSVCmdiskgrp.mdiskgrp_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_pool_update_with_ownership_group(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ get_existing_pool_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_pool_create_get_existing_pool_called',
+ 'ownershipgroup': 'owner0'
+ })
+ get_existing_pool_mock.return_value = {
+ "id": "0",
+ "name": "test_pool_create_get_existing_pool_called",
+ "owner_name": ""
+ }
+ pool_updated = IBMSVCmdiskgrp()
+ with pytest.raises(AnsibleExitJson) as exc:
+ pool_updated.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+ get_existing_pool_mock.assert_called_with("test_pool_create_get_existing_pool_called")
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_mdiskgrp.IBMSVCmdiskgrp.mdiskgrp_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_discard_ownership_group_from_pool(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ get_existing_pool_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_pool_create_get_existing_pool_called',
+ 'noownershipgroup': True
+ })
+ get_existing_pool_mock.return_value = {
+ "id": "0",
+ "name": "test_pool_create_get_existing_pool_called",
+ "owner_name": "owner0"
+ }
+ pool_updated = IBMSVCmdiskgrp()
+ with pytest.raises(AnsibleExitJson) as exc:
+ pool_updated.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+ get_existing_pool_mock.assert_called_with("test_pool_create_get_existing_pool_called")
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_mdiskgrp.IBMSVCmdiskgrp.mdiskgrp_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_pool_update_with_warning(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ get_existing_pool_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_pool_create_get_existing_pool_called',
+ 'warning': '1'
+ })
+ get_existing_pool_mock.return_value = {
+ "id": "0",
+ "name": "test_pool_create_get_existing_pool_called",
+ "warning": ""
+ }
+ pool_updated = IBMSVCmdiskgrp()
+ with pytest.raises(AnsibleExitJson) as exc:
+ pool_updated.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+ get_existing_pool_mock.assert_called_with("test_pool_create_get_existing_pool_called")
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_mdiskgrp.IBMSVCmdiskgrp.mdiskgrp_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_pool_update_with_vdiskprotectionenabled(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ get_existing_pool_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_pool_create_get_existing_pool_called',
+ 'vdiskprotectionenabled': 'no'
+ })
+ get_existing_pool_mock.return_value = {
+ "id": "0",
+ "name": "test_pool_create_get_existing_pool_called",
+ "vdisk_protectionenabled": ""
+ }
+ pool_updated = IBMSVCmdiskgrp()
+ with pytest.raises(AnsibleExitJson) as exc:
+ pool_updated.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+ get_existing_pool_mock.assert_called_with("test_pool_create_get_existing_pool_called")
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_mdiskgrp.IBMSVCmdiskgrp.mdiskgrp_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_pool_failed_since_missed_required_param(self,
+ svc_authorize_mock,
+ get_existing_pool_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'ansible_pool',
+ })
+ get_existing_pool_mock.return_value = []
+ pool_created = IBMSVCmdiskgrp()
+ with pytest.raises(AnsibleFailJson) as exc:
+ pool_created.apply()
+ self.assertTrue(exc.value.args[0]['failed'])
+ get_existing_pool_mock.assert_called_with("ansible_pool")
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_mdiskgrp.IBMSVCmdiskgrp.mdiskgrp_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_mdiskgrp.IBMSVCmdiskgrp.mdiskgrp_probe')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_pool_but_pool_existed(self,
+ svc_authorize_mock,
+ pool_probe_mock,
+ get_existing_pool_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'ansible_pool',
+ })
+ pool_ret = {"id": "0", "name": "Pool_Ansible_collections",
+ "status": "online", "mdisk_count": "1",
+ "vdisk_count": "1",
+ "capacity": "5.23TB", "extent_size": "1024",
+ "free_capacity": "5.23TB", "virtual_capacity": "4.00GB",
+ "used_capacity": "4.00GB", "real_capacity": "4.00GB",
+ "overallocation": "0", "warning": "0", "easy_tier": "on",
+ "easy_tier_status": "balanced",
+ "compression_active": "no",
+ "compression_virtual_capacity": "0.00MB",
+ "compression_compressed_capacity": "0.00MB",
+ "compression_uncompressed_capacity": "0.00MB",
+ "parent_mdisk_grp_id": "0",
+ "parent_mdisk_grp_name": "Pool_Ansible_collections",
+ "child_mdisk_grp_count": "0",
+ "child_mdisk_grp_capacity": "0.00MB", "type": "parent",
+ "encrypt": "no", "owner_type": "none", "owner_id": "",
+ "owner_name": "", "site_id": "", "site_name": "",
+ "data_reduction": "no",
+ "used_capacity_before_reduction": "0.00MB",
+ "used_capacity_after_reduction": "0.00MB",
+ "overhead_capacity": "0.00MB",
+ "deduplication_capacity_saving": "0.00MB",
+ "reclaimable_capacity": "0.00MB",
+ "easy_tier_fcm_over_allocation_max": "100%"
+ }
+ get_existing_pool_mock.return_value = pool_ret
+ pool_probe_mock.return_value = []
+ pool_created = IBMSVCmdiskgrp()
+ with pytest.raises(AnsibleExitJson) as exc:
+ pool_created.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+ get_existing_pool_mock.assert_called_with("ansible_pool")
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_mdiskgrp.IBMSVCmdiskgrp.mdiskgrp_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_mdiskgrp.IBMSVCmdiskgrp.mdiskgrp_create')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_pool_successfully(self,
+ svc_authorize_mock,
+ pool_create_mock,
+ get_existing_pool_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'ansible_pool',
+ 'datareduction': 'no',
+ 'easytier': 'auto',
+ 'encrypt': 'no',
+ 'ext': '1024',
+ })
+ pool = {u'message': u'Storage pool, id [0], '
+ u'successfully created', u'id': u'0'}
+ pool_create_mock.return_value = pool
+ get_existing_pool_mock.return_value = []
+ pool_created = IBMSVCmdiskgrp()
+ with pytest.raises(AnsibleExitJson) as exc:
+ pool_created.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+ get_existing_pool_mock.assert_called_with("ansible_pool")
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_mdiskgrp.IBMSVCmdiskgrp.mdiskgrp_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_pool_failed_since_no_message_in_result(self,
+ svc_authorize_mock,
+ svc_run_command_mock,
+ get_existing_pool_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'ansible_pool',
+ 'datareduction': 'no',
+ 'easytier': 'auto',
+ 'encrypt': 'no',
+ 'ext': '1024',
+ })
+ pool = {u'id': u'0'}
+ svc_run_command_mock.return_value = pool
+ get_existing_pool_mock.return_value = []
+ pool_created = IBMSVCmdiskgrp()
+ with pytest.raises(AnsibleFailJson) as exc:
+ pool_created.apply()
+ get_existing_pool_mock.assert_called_with("ansible_pool")
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_mdiskgrp_rename(self, mock_auth, mock_old, mock_cmd):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'old_name': 'name',
+ 'name': 'new_name',
+ 'state': 'present',
+ })
+ mock_old.return_value = [
+ {
+ "id": "1", "name": "ansible_pool"
+ }
+ ]
+ arg_data = []
+ mock_cmd.return_value = None
+ v = IBMSVCmdiskgrp()
+ data = v.mdiskgrp_rename(arg_data)
+ self.assertTrue(data, 'mdiskgrp [name] has been successfully rename to [new_name].')
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_mdiskgrp.IBMSVCmdiskgrp.mdiskgrp_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_delete_pool_but_pool_not_existed(self,
+ svc_authorize_mock,
+ get_existing_pool_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'absent',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'ansible_pool',
+ 'datareduction': 'no',
+ 'easytier': 'auto',
+ 'encrypt': 'no',
+ 'ext': '1024',
+ })
+ get_existing_pool_mock.return_value = []
+ pool_deleted = IBMSVCmdiskgrp()
+ with pytest.raises(AnsibleExitJson) as exc:
+ pool_deleted.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+ get_existing_pool_mock.assert_called_with("ansible_pool")
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_mdiskgrp.IBMSVCmdiskgrp.mdiskgrp_exists')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_mdiskgrp.IBMSVCmdiskgrp.mdiskgrp_delete')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_delete_pool_successfully(self,
+ svc_authorize_mock,
+ pool_delete_mock,
+ get_existing_pool_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'absent',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'ansible_pool',
+ })
+ pool_ret = {"id": "0", "name": "Pool_Ansible_collections",
+ "status": "online", "mdisk_count": "1",
+ "vdisk_count": "1",
+ "capacity": "5.23TB", "extent_size": "1024",
+ "free_capacity": "5.23TB", "virtual_capacity": "4.00GB",
+ "used_capacity": "4.00GB", "real_capacity": "4.00GB",
+ "overallocation": "0", "warning": "0", "easy_tier": "on",
+ "easy_tier_status": "balanced",
+ "compression_active": "no",
+ "compression_virtual_capacity": "0.00MB",
+ "compression_compressed_capacity": "0.00MB",
+ "compression_uncompressed_capacity": "0.00MB",
+ "parent_mdisk_grp_id": "0",
+ "parent_mdisk_grp_name": "Pool_Ansible_collections",
+ "child_mdisk_grp_count": "0",
+ "child_mdisk_grp_capacity": "0.00MB", "type": "parent",
+ "encrypt": "no", "owner_type": "none", "owner_id": "",
+ "owner_name": "", "site_id": "", "site_name": "",
+ "data_reduction": "no",
+ "used_capacity_before_reduction": "0.00MB",
+ "used_capacity_after_reduction": "0.00MB",
+ "overhead_capacity": "0.00MB",
+ "deduplication_capacity_saving": "0.00MB",
+ "reclaimable_capacity": "0.00MB",
+ "easy_tier_fcm_over_allocation_max": "100%"}
+ get_existing_pool_mock.return_value = pool_ret
+ pool_deleted = IBMSVCmdiskgrp()
+ with pytest.raises(AnsibleExitJson) as exc:
+ pool_deleted.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+ get_existing_pool_mock.assert_called_with("ansible_pool")
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_start_stop_flashcopy.py b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_start_stop_flashcopy.py
new file mode 100644
index 000000000..ab79f8047
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_start_stop_flashcopy.py
@@ -0,0 +1,482 @@
+# Copyright (C) 2020 IBM CORPORATION
+# Author(s): Sreshtant Bohidar <sreshtant.bohidar@ibm.com>
+#
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+""" unit tests IBM Storage Virtualize Ansible module: ibm_svc_start_stop_flashcopy """
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import unittest
+import pytest
+import json
+from mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi
+from ansible_collections.ibm.storage_virtualize.plugins.modules.ibm_svc_start_stop_flashcopy import IBMSVCFlashcopyStartStop
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation """
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the
+ test case """
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the
+ test case """
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an
+ exception """
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an
+ exception """
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class TestIBMSVCFlashcopyStartStop(unittest.TestCase):
+ """ a group of related Unit Tests"""
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def setUp(self, connect):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.restapi = IBMSVCRestApi(self.mock_module_helper, '1.2.3.4',
+ 'domain.ibm.com', 'username', 'password',
+ False, 'test.log', '')
+
+ def set_default_args(self):
+ return dict({
+ 'name': 'test',
+ 'state': 'present'
+ })
+
+ def test_module_fail_when_required_args_missing(self):
+ """ required arguments are reported as errors """
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ IBMSVCFlashcopyStartStop()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_get_existing_fcmapping(self, svc_authorize_mock, svc_obj_info_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'state': 'started'
+ })
+ svc_obj_info_mock.return_value = {
+ "id": "45", "name": "test_name", "source_vdisk_id": "320", "source_vdisk_name": "Ans_n7",
+ "target_vdisk_id": "323", "target_vdisk_name": "target_vdisk", "group_id": "1", "group_name": "test_group",
+ "status": "idle_or_copied", "progress": "0", "copy_rate": "0", "start_time": "",
+ "dependent_mappings": "0", "autodelete": "off", "clean_progress": "100", "clean_rate": "0",
+ "incremental": "off", "difference": "100", "grain_size": "256", "IO_group_id": "0",
+ "IO_group_name": "io_grp_name", "partner_FC_id": "43", "partner_FC_name": "test_fcmap",
+ "restoring": "no", "rc_controlled": "no", "keep_target": "no", "type": "generic",
+ "restore_progress": "0", "fc_controlled": "no", "owner_id": "", "owner_name": ""
+ }
+ obj = IBMSVCFlashcopyStartStop()
+ data = obj.get_existing_fcmapping()
+ self.assertEqual('test_name', data['name'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_get_existing_fcmapping_isgroup(self, svc_authorize_mock, svc_obj_info_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_group',
+ 'state': 'started',
+ 'isgroup': True
+ })
+ svc_obj_info_mock.return_value = {
+ 'id': '4', 'name': 'test_group', 'status': 'stopped',
+ 'autodelete': 'off', 'start_time': '', 'owner_id': '0',
+ 'owner_name': 'test_ownershipgroup', 'FC_mapping_id': '39',
+ 'FC_mapping_name': 'test_mapping'
+ }
+ obj = IBMSVCFlashcopyStartStop()
+ data = obj.get_existing_fcmapping()
+ self.assertEqual('test_group', data['name'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_start_fc(self, svc_authorize_mock, svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'state': 'started',
+ })
+ svc_run_command_mock.return_value = None
+ obj = IBMSVCFlashcopyStartStop()
+ data = obj.start_fc()
+ self.assertEqual(None, data)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_start_fc_isgroup(self, svc_authorize_mock, svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'state': 'started',
+ 'isgroup': True
+ })
+ svc_run_command_mock.return_value = None
+ obj = IBMSVCFlashcopyStartStop()
+ data = obj.start_fc()
+ self.assertEqual(None, data)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_stop_fc(self, svc_authorize_mock, svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'state': 'stopped',
+ })
+ svc_run_command_mock.return_value = None
+ obj = IBMSVCFlashcopyStartStop()
+ data = obj.stop_fc()
+ self.assertEqual(None, data)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_stop_fc_isgroup(self, svc_authorize_mock, svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'state': 'stopped',
+ 'isgroup': True
+ })
+ svc_run_command_mock.return_value = None
+ obj = IBMSVCFlashcopyStartStop()
+ data = obj.stop_fc()
+ self.assertEqual(None, data)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_start_stop_flashcopy.IBMSVCFlashcopyStartStop.start_fc')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_start_stop_flashcopy.IBMSVCFlashcopyStartStop.get_existing_fcmapping')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_start_existsting_fc_mappping(self, svc_authorize_mock, gef, sc):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'state': 'started'
+ })
+ gef.return_value = {
+ "id": "39", "name": "test_mapping", "source_vdisk_id": "146",
+ "source_vdisk_name": "test_source", "target_vdisk_id": "324",
+ "target_vdisk_name": "test_target", "group_id": "", "group_name": "",
+ "status": "idle_or_copied", "progress": "0", "copy_rate": "41", "start_time": "",
+ "dependent_mappings": "0", "autodelete": "off", "clean_progress": "100",
+ "clean_rate": "50", "incremental": "off", "difference": "100", "grain_size": "256",
+ "IO_group_id": "0", "IO_group_name": "io_grp0", "partner_FC_id": "",
+ "partner_FC_name": "", "restoring": "no", "rc_controlled": "no", "keep_target": "no",
+ "type": "generic", "restore_progress": "0", "fc_controlled": "no", "owner_id": "", "owner_name": ""
+ }
+ sc.return_value = None
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj = IBMSVCFlashcopyStartStop()
+ obj.apply()
+ self.assertEqual(True, exc.value.args[0]["changed"])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_start_stop_flashcopy.IBMSVCFlashcopyStartStop.start_fc')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_start_stop_flashcopy.IBMSVCFlashcopyStartStop.get_existing_fcmapping')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_start_nonexiststing_fc_mappping(self, svc_authorize_mock, gef, sc):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'state': 'started'
+ })
+ gef.return_value = {}
+ sc.return_value = None
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj = IBMSVCFlashcopyStartStop()
+ obj.apply()
+ self.assertEqual(False, exc.value.args[0]["changed"])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_start_stop_flashcopy.IBMSVCFlashcopyStartStop.start_fc')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_start_stop_flashcopy.IBMSVCFlashcopyStartStop.get_existing_fcmapping')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_start_existsting_fc_consistgrp(self, svc_authorize_mock, gef, sc):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_group',
+ 'state': 'started',
+ 'isgroup': True
+ })
+ gef.return_value = {
+ 'id': '4', 'name': 'test_group', 'status': 'copying',
+ 'autodelete': 'off', 'start_time': '210112110946', 'owner_id': '0',
+ 'owner_name': 'test_ownershipgroup', 'FC_mapping_id': '39',
+ 'FC_mapping_name': 'test_mapping'
+ }
+ sc.return_value = None
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj = IBMSVCFlashcopyStartStop()
+ obj.apply()
+ self.assertEqual(False, exc.value.args[0]["changed"])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_start_stop_flashcopy.IBMSVCFlashcopyStartStop.start_fc')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_start_stop_flashcopy.IBMSVCFlashcopyStartStop.get_existing_fcmapping')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_start_nonexiststing_fc_consistgrp(self, svc_authorize_mock, gef, sc):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_group',
+ 'state': 'started',
+ 'isgroup': True
+ })
+ gef.return_value = {}
+ sc.return_value = None
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj = IBMSVCFlashcopyStartStop()
+ obj.apply()
+ self.assertEqual(False, exc.value.args[0]["changed"])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_start_stop_flashcopy.IBMSVCFlashcopyStartStop.stop_fc')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_start_stop_flashcopy.IBMSVCFlashcopyStartStop.get_existing_fcmapping')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_stop_existsting_fc_mappping(self, svc_authorize_mock, gef, sc):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'state': 'stopped'
+ })
+ gef.return_value = {
+ "id": "39", "name": "test_mapping", "source_vdisk_id": "146",
+ "source_vdisk_name": "test_source", "target_vdisk_id": "324",
+ "target_vdisk_name": "test_target", "group_id": "4",
+ "group_name": "test_group", "status": "copying", "progress": "0",
+ "copy_rate": "41", "start_time": "210112113610", "dependent_mappings": "0",
+ "autodelete": "off", "clean_progress": "100", "clean_rate": "50",
+ "incremental": "off", "difference": "100", "grain_size": "256",
+ "IO_group_id": "0", "IO_group_name": "io_grp0", "partner_FC_id": "",
+ "partner_FC_name": "", "restoring": "no", "rc_controlled": "no",
+ "keep_target": "no", "type": "generic", "restore_progress": "0",
+ "fc_controlled": "no", "owner_id": "", "owner_name": ""
+ }
+ sc.return_value = None
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj = IBMSVCFlashcopyStartStop()
+ obj.apply()
+ self.assertEqual(True, exc.value.args[0]["changed"])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_start_stop_flashcopy.IBMSVCFlashcopyStartStop.stop_fc')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_start_stop_flashcopy.IBMSVCFlashcopyStartStop.get_existing_fcmapping')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_stop_existsting_fc_mappping_with_force(self, svc_authorize_mock, gef, sc):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'state': 'stopped',
+ 'force': True
+ })
+ gef.return_value = {
+ "id": "39", "name": "test_mapping", "source_vdisk_id": "146",
+ "source_vdisk_name": "test_source", "target_vdisk_id": "324",
+ "target_vdisk_name": "test_target", "group_id": "4",
+ "group_name": "test_group", "status": "copying", "progress": "0",
+ "copy_rate": "41", "start_time": "210112113610", "dependent_mappings": "0",
+ "autodelete": "off", "clean_progress": "100", "clean_rate": "50",
+ "incremental": "off", "difference": "100", "grain_size": "256",
+ "IO_group_id": "0", "IO_group_name": "io_grp0", "partner_FC_id": "",
+ "partner_FC_name": "", "restoring": "no", "rc_controlled": "no",
+ "keep_target": "no", "type": "generic", "restore_progress": "0",
+ "fc_controlled": "no", "owner_id": "", "owner_name": ""
+ }
+ sc.return_value = None
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj = IBMSVCFlashcopyStartStop()
+ obj.apply()
+ self.assertEqual(True, exc.value.args[0]["changed"])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_start_stop_flashcopy.IBMSVCFlashcopyStartStop.stop_fc')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_start_stop_flashcopy.IBMSVCFlashcopyStartStop.get_existing_fcmapping')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_stop_nonexiststing_fc_mappping(self, svc_authorize_mock, gef, sc):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'state': 'stopped'
+ })
+ gef.return_value = {}
+ sc.return_value = None
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj = IBMSVCFlashcopyStartStop()
+ obj.apply()
+ self.assertEqual(False, exc.value.args[0]["changed"])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_start_stop_flashcopy.IBMSVCFlashcopyStartStop.stop_fc')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_start_stop_flashcopy.IBMSVCFlashcopyStartStop.get_existing_fcmapping')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_stop_existsting_fc_consistgrp(self, svc_authorize_mock, gef, sc):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'state': 'stopped'
+ })
+ gef.return_value = {
+ 'id': '4', 'name': 'test_group', 'status': 'copying',
+ 'autodelete': 'off', 'start_time': '210112113610', 'owner_id': '0',
+ 'owner_name': 'test_ownershipgroup', 'FC_mapping_id': '39',
+ 'FC_mapping_name': 'test_mapping'
+ }
+ sc.return_value = None
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj = IBMSVCFlashcopyStartStop()
+ obj.apply()
+ self.assertEqual(True, exc.value.args[0]["changed"])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_start_stop_flashcopy.IBMSVCFlashcopyStartStop.stop_fc')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_start_stop_flashcopy.IBMSVCFlashcopyStartStop.get_existing_fcmapping')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_stop_existsting_fc_consistgrp_with_force(self, svc_authorize_mock, gef, sc):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'state': 'stopped',
+ 'force': True
+ })
+ gef.return_value = {
+ 'id': '4', 'name': 'test_group', 'status': 'copying',
+ 'autodelete': 'off', 'start_time': '210112113610', 'owner_id': '0',
+ 'owner_name': 'test_ownershipgroup', 'FC_mapping_id': '39',
+ 'FC_mapping_name': 'test_mapping'
+ }
+ sc.return_value = None
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj = IBMSVCFlashcopyStartStop()
+ obj.apply()
+ self.assertEqual(True, exc.value.args[0]["changed"])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_start_stop_flashcopy.IBMSVCFlashcopyStartStop.stop_fc')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_start_stop_flashcopy.IBMSVCFlashcopyStartStop.get_existing_fcmapping')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_stop_nonexiststing_fc_consistgrp(self, svc_authorize_mock, gef, sc):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'state': 'stopped'
+ })
+ gef.return_value = {}
+ sc.return_value = None
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj = IBMSVCFlashcopyStartStop()
+ obj.apply()
+ self.assertEqual(False, exc.value.args[0]["changed"])
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_start_stop_replication.py b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_start_stop_replication.py
new file mode 100644
index 000000000..d1d4b318c
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_start_stop_replication.py
@@ -0,0 +1,388 @@
+# Copyright (C) 2020 IBM CORPORATION
+# Author(s):
+#
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+""" unit tests IBM Storage Virtualize Ansible module: ibm_svc_manage_replication """
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import unittest
+import pytest
+import json
+from mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi
+from ansible_collections.ibm.storage_virtualize.plugins.modules.ibm_svc_start_stop_replication import IBMSVCStartStopReplication
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation """
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the
+ test case """
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the
+ test case """
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an
+ exception """
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an
+ exception """
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class TestIBMSVCStartStopReplication(unittest.TestCase):
+ """ a group of related Unit Tests"""
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def setUp(self, connect):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.restapi = IBMSVCRestApi(self.mock_module_helper, '1.2.3.4',
+ 'domain.ibm.com', 'username', 'password',
+ False, 'test.log', '')
+
+ def set_default_args(self):
+ return dict({
+ 'name': 'test',
+ 'state': 'present'
+ })
+
+ def test_module_fail_when_required_args_missing(self):
+ """ required arguments are reported as errors """
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ IBMSVCStartStopReplication()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_start(self, svc_authorize_mock, svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'state': 'started',
+ 'clean': 'true'
+ })
+ svc_run_command_mock.return_value = ''
+ obj = IBMSVCStartStopReplication()
+ return_data = obj.start()
+ self.assertEqual(None, return_data)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_start_when_isgroup(self, svc_authorize_mock, svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'state': 'started',
+ 'clean': 'true',
+ 'isgroup': 'true'
+ })
+ svc_run_command_mock.return_value = ''
+ obj = IBMSVCStartStopReplication()
+ return_data = obj.start()
+ self.assertEqual(None, return_data)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_failure_rcrelationship(self, svc_authorize_mock, svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'state': 'started',
+ 'clean': 'true',
+ })
+ svc_run_command_mock.return_value = {}
+ with pytest.raises(AnsibleFailJson) as exc:
+ obj = IBMSVCStartStopReplication()
+ obj.start()
+ self.assertEqual('Failed to start the rcrelationship [test_name]', exc.value.args[0]['msg'])
+ self.assertEqual(True, exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_failure_starting_rcrelationship(self, svc_authorize_mock, svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'state': 'started',
+ 'clean': 'true',
+ })
+ svc_run_command_mock.return_value = {}
+ with pytest.raises(AnsibleFailJson) as exc:
+ obj = IBMSVCStartStopReplication()
+ obj.start()
+ self.assertEqual('Failed to start the rcrelationship [test_name]', exc.value.args[0]['msg'])
+ self.assertEqual(True, exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_failure_starting_when_isgroup(self, svc_authorize_mock, svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'state': 'started',
+ 'clean': 'true',
+ 'isgroup': 'true'
+ })
+ svc_run_command_mock.return_value = {}
+ with pytest.raises(AnsibleFailJson) as exc:
+ obj = IBMSVCStartStopReplication()
+ obj.start()
+ self.assertEqual(True, exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_stop(self, svc_authorize_mock, svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'state': 'stopped',
+ 'clean': 'true'
+ })
+ svc_run_command_mock.return_value = ''
+ obj = IBMSVCStartStopReplication()
+ return_data = obj.stop()
+ self.assertEqual(None, return_data)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_stop_when_isgroup(self, svc_authorize_mock, svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'state': 'stopped',
+ 'isgroup': 'true'
+ })
+ svc_run_command_mock.return_value = ''
+ obj = IBMSVCStartStopReplication()
+ return_data = obj.stop()
+ self.assertEqual(None, return_data)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_failure_stopping_rcrelationship(self, svc_authorize_mock, svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'state': 'stopped',
+ })
+ svc_run_command_mock.return_value = {}
+ with pytest.raises(AnsibleFailJson) as exc:
+ obj = IBMSVCStartStopReplication()
+ obj.stop()
+ self.assertEqual(True, exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_failure_stopping_when_isgroup(self, svc_authorize_mock, svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'state': 'stopped',
+ 'isgroup': 'true'
+ })
+ svc_run_command_mock.return_value = {}
+ with pytest.raises(AnsibleFailJson) as exc:
+ obj = IBMSVCStartStopReplication()
+ obj.stop()
+ self.assertEqual(True, exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_for_failure_with_activeactive(self, svc_authorize_mock, svc_run_command_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'state': 'started',
+ 'clean': 'true'
+ })
+ with pytest.raises(AnsibleFailJson) as exc:
+ obj = IBMSVCStartStopReplication()
+ obj.apply()
+ self.assertEqual(True, exc.value.args[0]['failed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_start_stop_replication.IBMSVCStartStopReplication.start')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_start_remotecopy(self, svc_authorize_mock, svc_run_command_mock, start_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'state': 'started',
+ 'clean': 'true'
+ })
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj = IBMSVCStartStopReplication()
+ obj.apply()
+ self.assertEqual(True, exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_start_stop_replication.IBMSVCStartStopReplication.start')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_start_remotecopy_when_isgroup(self, svc_authorize_mock, svc_run_command_mock, start_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'state': 'started',
+ 'clean': 'true',
+ 'isgroup': 'true'
+ })
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj = IBMSVCStartStopReplication()
+ obj.apply()
+ self.assertEqual(True, exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_start_stop_replication.IBMSVCStartStopReplication.stop')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_stop_remotecopy(self, svc_authorize_mock, svc_run_command_mock, start_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'state': 'stopped',
+ })
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj = IBMSVCStartStopReplication()
+ obj.apply()
+ self.assertEqual(True, exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_start_stop_replication.IBMSVCStartStopReplication.stop')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_stop_remotecopy_when_isgroup(self, svc_authorize_mock, svc_run_command_mock, start_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'state': 'stopped',
+ 'clean': 'true',
+ 'isgroup': 'true'
+ })
+ with pytest.raises(AnsibleExitJson) as exc:
+ obj = IBMSVCStartStopReplication()
+ obj.apply()
+ self.assertEqual(True, exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_start_stop_replication.IBMSVCStartStopReplication.stop')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_run_command')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_for_failure_with_unsupported_state(self, svc_authorize_mock, svc_run_command_mock, start_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'username': 'username',
+ 'password': 'password',
+ 'name': 'test_name',
+ 'state': 'wrong_state',
+ 'clean': 'true',
+ })
+ with pytest.raises(AnsibleFailJson) as exc:
+ obj = IBMSVCStartStopReplication()
+ obj.apply()
+ self.assertEqual(True, exc.value.args[0]["failed"])
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_vol_map.py b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_vol_map.py
new file mode 100644
index 000000000..31422c734
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svc_vol_map.py
@@ -0,0 +1,360 @@
+# Copyright (C) 2020 IBM CORPORATION
+# Author(s): Peng Wang <wangpww@cn.ibm.com>
+#
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+""" unit tests IBM Storage Virtualize Ansible module: ibm_svc_vol_map """
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import unittest
+import pytest
+import json
+from mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.ibm.storage_virtualize.plugins.module_utils.ibm_svc_utils import IBMSVCRestApi
+from ansible_collections.ibm.storage_virtualize.plugins.modules.ibm_svc_vol_map import IBMSVCvdiskhostmap
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module
+ creation """
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the
+ test case """
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the
+ test case """
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an
+ exception """
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an
+ exception """
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class TestIBMSVCvdiskhostmap(unittest.TestCase):
+ """ a group of related Unit Tests"""
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def setUp(self, connect):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.restapi = IBMSVCRestApi(self.mock_module_helper, '1.2.3.4',
+ 'domain.ibm.com', 'username', 'password',
+ False, 'test.log', '')
+
+ def set_default_args(self):
+ return dict({
+ 'name': 'test',
+ 'state': 'present'
+ })
+
+ def test_module_fail_when_required_args_missing(self):
+ """ required arguments are reported as errors """
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({})
+ IBMSVCvdiskhostmap()
+ print('Info: %s' % exc.value.args[0]['msg'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi.svc_obj_info')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_get_existing_vdiskhostmap(self, svc_authorize_mock,
+ svc_obj_info_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'volname': 'volume0',
+ 'host': 'host4test',
+ })
+ mapping_ret = [{"id": "0", "name": "volume_Ansible_collections",
+ "SCSI_id": "0", "host_id": "14",
+ "host_name": "host_ansible_collects",
+ "vdisk_UID": "6005076810CA0166C00000000000019F",
+ "IO_group_id": "0", "IO_group_name": "io_grp0",
+ "mapping_type": "private", "host_cluster_id": "",
+ "host_cluster_name": "", "protocol": "scsi"}]
+ svc_obj_info_mock.return_value = mapping_ret
+ host_mapping_data = IBMSVCvdiskhostmap().get_existing_vdiskhostmap()
+ for host_mapping in host_mapping_data:
+ self.assertEqual('volume_Ansible_collections', host_mapping['name'])
+ self.assertEqual('0', host_mapping['id'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_vol_map.IBMSVCvdiskhostmap.get_existing_vdiskhostmap')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_vol_map.IBMSVCvdiskhostmap.vdiskhostmap_probe')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_host_mapping_create_get_existing_vdiskhostmap_called(
+ self, svc_authorize_mock, vdiskhostmap_probe_mock,
+ get_existing_vdiskhostmap_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'volname': 'volume0',
+ 'host': 'host4test',
+ })
+ host_mapping_created = IBMSVCvdiskhostmap()
+ vdiskhostmap_probe_mock.return_value = []
+ with pytest.raises(AnsibleExitJson) as exc:
+ host_mapping_created.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+ get_existing_vdiskhostmap_mock.assert_called_with()
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_vol_map.IBMSVCvdiskhostmap.get_existing_vdiskhostmap')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_vol_map.IBMSVCvdiskhostmap.vdiskhostmap_create')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_host_mapping_but_host_mapping_existed(
+ self, svc_authorize_mock,
+ vdiskhostmap_create_mock,
+ get_existing_vdiskhostmap_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'volname': 'volume0',
+ 'host': 'host4test',
+ })
+ host_mapping = {u'message': u'Virtual Disk to Host map, id [0], '
+ u'successfully created', u'id': u'0'}
+ vdiskhostmap_create_mock.return_value = host_mapping
+ get_existing_vdiskhostmap_mock.return_value = []
+ host_mapping_created = IBMSVCvdiskhostmap()
+ with pytest.raises(AnsibleExitJson) as exc:
+ host_mapping_created.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+ get_existing_vdiskhostmap_mock.assert_called_with()
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_vol_map.IBMSVCvdiskhostmap.get_existing_vdiskhostmap')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_vol_map.IBMSVCvdiskhostmap.vdiskhostmap_create')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_host_mapping_successfully(self, svc_authorize_mock,
+ vdiskhostmap_create_mock,
+ get_existing_vdiskhostmap_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'volname': 'volume0',
+ 'host': 'host4test',
+ })
+ host_mapping = {u'message': u'Virtual Disk to Host map, id [0], '
+ u'successfully created', u'id': u'0'}
+ vdiskhostmap_create_mock.return_value = host_mapping
+ get_existing_vdiskhostmap_mock.return_value = []
+ host_mapping_created = IBMSVCvdiskhostmap()
+ with pytest.raises(AnsibleExitJson) as exc:
+ host_mapping_created.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+ get_existing_vdiskhostmap_mock.assert_called_with()
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_vol_map.IBMSVCvdiskhostmap.get_existing_vdiskhostmap')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_delete_host_but_host_not_existed(self, svc_authorize_mock,
+ get_existing_vdiskhostmap_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'absent',
+ 'username': 'username',
+ 'password': 'password',
+ 'volname': 'volume0',
+ 'host': 'host4test',
+ })
+ get_existing_vdiskhostmap_mock.return_value = []
+ host_mapping_deleted = IBMSVCvdiskhostmap()
+ with pytest.raises(AnsibleExitJson) as exc:
+ host_mapping_deleted.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+ get_existing_vdiskhostmap_mock.assert_called_with()
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_vol_map.IBMSVCvdiskhostmap.get_existing_vdiskhostmap')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_vol_map.IBMSVCvdiskhostmap.vdiskhostmap_delete')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_delete_host_successfully(self, svc_authorize_mock,
+ host_delete_mock,
+ get_existing_vdiskhostmap_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'absent',
+ 'username': 'username',
+ 'password': 'password',
+ 'volname': 'volume0',
+ 'host': 'host4test',
+ })
+ mapping_ret = [{"id": "0", "name": "volume_Ansible_collections",
+ "SCSI_id": "0", "host_id": "14",
+ "host_name": "host_ansible_collects",
+ "vdisk_UID": "6005076810CA0166C00000000000019F",
+ "IO_group_id": "0", "IO_group_name": "io_grp0",
+ "mapping_type": "private", "host_cluster_id": "",
+ "host_cluster_name": "", "protocol": "scsi"}]
+ get_existing_vdiskhostmap_mock.return_value = mapping_ret
+ host_mapping_deleted = IBMSVCvdiskhostmap()
+ with pytest.raises(AnsibleExitJson) as exc:
+ host_mapping_deleted.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+ get_existing_vdiskhostmap_mock.assert_called_with()
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_vol_map.IBMSVCvdiskhostmap.get_existing_vdiskhostmap')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_vol_map.IBMSVCvdiskhostmap.vdiskhostclustermap_create')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_hostcluster_mapping_successfully(self, svc_authorize_mock,
+ vdiskhostclustermap_create_mock,
+ get_existing_vdiskhostmap_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'volname': 'volume0',
+ 'hostcluster': 'hostcluster4test',
+ })
+ host_mapping = {u'message': u'Volume to Host Cluster map, id [0], '
+ u'successfully created', u'id': u'0'}
+ vdiskhostclustermap_create_mock.return_value = host_mapping
+ get_existing_vdiskhostmap_mock.return_value = []
+ host_mapping_created = IBMSVCvdiskhostmap()
+ with pytest.raises(AnsibleExitJson) as exc:
+ host_mapping_created.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+ get_existing_vdiskhostmap_mock.assert_called_with()
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_vol_map.IBMSVCvdiskhostmap.get_existing_vdiskhostmap')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_vol_map.IBMSVCvdiskhostmap.vdiskhostclustermap_create')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_create_hostcluster_mapping_but_mapping_exist(
+ self, svc_authorize_mock,
+ vdiskhostclustermap_create_mock,
+ get_existing_vdiskhostmap_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'present',
+ 'username': 'username',
+ 'password': 'password',
+ 'volname': 'volume0',
+ 'hostcluster': 'hostcluster4test',
+ })
+ host_mapping = {u'message': u'Volume to Host Cluster map, id [0], '
+ u'successfully created', u'id': u'0'}
+ vdiskhostclustermap_create_mock.return_value = host_mapping
+ get_existing_vdiskhostmap_mock.return_value = []
+ host_mapping_created = IBMSVCvdiskhostmap()
+ with pytest.raises(AnsibleExitJson) as exc:
+ host_mapping_created.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+ get_existing_vdiskhostmap_mock.assert_called_with()
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_vol_map.IBMSVCvdiskhostmap.get_existing_vdiskhostmap')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_delete_hostcluster_mapping_not_exist(self, svc_authorize_mock,
+ get_existing_vdiskhostmap_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'absent',
+ 'username': 'username',
+ 'password': 'password',
+ 'volname': 'volume0',
+ 'hostcluster': 'hostcluster4test',
+ })
+ get_existing_vdiskhostmap_mock.return_value = []
+ host_mapping_deleted = IBMSVCvdiskhostmap()
+ with pytest.raises(AnsibleExitJson) as exc:
+ host_mapping_deleted.apply()
+ self.assertFalse(exc.value.args[0]['changed'])
+ get_existing_vdiskhostmap_mock.assert_called_with()
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_vol_map.IBMSVCvdiskhostmap.get_existing_vdiskhostmap')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.modules.'
+ 'ibm_svc_vol_map.IBMSVCvdiskhostmap.vdiskhostclustermap_delete')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_utils.IBMSVCRestApi._svc_authorize')
+ def test_delete_hostcluster_mapping_successfully(self, svc_authorize_mock,
+ host_delete_mock,
+ get_existing_vdiskhostmap_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'domain': 'domain',
+ 'state': 'absent',
+ 'username': 'username',
+ 'password': 'password',
+ 'volname': 'volume0',
+ 'hostcluster': 'hostcluster4test',
+ })
+ mapping_ret = [{"id": "0", "name": "volume_Ansible_collections",
+ "SCSI_id": "0", "host_id": "",
+ "host_name": "",
+ "vdisk_UID": "6005076810CA0166C00000000000019F",
+ "IO_group_id": "0", "IO_group_name": "io_grp0",
+ "mapping_type": "private", "host_cluster_id": "1",
+ "host_cluster_name": "hostcluster4test", "protocol": "scsi"}]
+ get_existing_vdiskhostmap_mock.return_value = mapping_ret
+ host_mapping_deleted = IBMSVCvdiskhostmap()
+ with pytest.raises(AnsibleExitJson) as exc:
+ host_mapping_deleted.apply()
+ self.assertTrue(exc.value.args[0]['changed'])
+ get_existing_vdiskhostmap_mock.assert_called_with()
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svcinfo_command.py b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svcinfo_command.py
new file mode 100644
index 000000000..4d7b0b945
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svcinfo_command.py
@@ -0,0 +1,190 @@
+# Copyright (C) 2020 IBM CORPORATION
+# Author(s): Shilpi Jain <shilpi.jain1@ibm.com>
+#
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+""" unit tests IBM Storage Virtualize Ansible module: ibm_svcinfo_command """
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import unittest
+import pytest
+import json
+from mock import patch
+from ansible.module_utils.compat.paramiko import paramiko
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.ibm.storage_virtualize.plugins.modules.ibm_svcinfo_command import IBMSVCsshClient
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module
+ creation """
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the
+ test case """
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the
+ test case """
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an
+ exception """
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an
+ exception """
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class TestIBMSVCsshClient(unittest.TestCase):
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+
+ def set_default_args(self):
+ return dict({
+ 'clustername': 'clustername',
+ 'username': 'username',
+ 'password': 'password',
+ 'command': 'svcinfo lsuser',
+ })
+
+ def test_ssh_connect_with_missing_username(self):
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({
+ 'clustername': 'clustername',
+ 'password': 'password',
+ 'command': 'svcinfo lsuser',
+ })
+ IBMSVCsshClient()
+ print('Info: %s' % exc.value.args[0]['msg'])
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ def test_ssh_connect_with_missing_password(self):
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({
+ 'clustername': 'clustername',
+ 'username': 'username',
+ 'command': 'svcinfo lsuser',
+ })
+ IBMSVCsshClient()
+ print('Info: %s' % exc.value.args[0]['msg'])
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_ssh.IBMSVCssh._svc_connect')
+ def test_ssh_connect_with_password(self, connect_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'username': 'username',
+ 'password': 'password',
+ 'command': 'svcinfo cli_command',
+ })
+ if paramiko is None:
+ print("paramiko is not installed")
+
+ patch.object(paramiko.SSHClient, 'exec_command')
+ conn = IBMSVCsshClient()
+ with pytest.raises(Exception) as exc:
+ conn.send_svcinfo_command()
+ print('Info: %s' % exc.value.args[0])
+ self.assertTrue(conn.ssh_client.is_client_connected)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_ssh.IBMSVCssh._svc_connect')
+ def test_ssh_connect_with_key(self, connect_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'username': 'username',
+ 'password': '',
+ 'usesshkey': 'yes',
+ 'command': 'svcinfo lsuser',
+ })
+ if paramiko is None:
+ print("paramiko is not installed")
+
+ patch.object(paramiko.SSHClient, 'exec_command')
+ conn = IBMSVCsshClient()
+ with pytest.raises(Exception) as exc:
+ conn.send_svcinfo_command()
+ print('Info: %s' % exc.value.args[0])
+ self.assertTrue(conn.ssh_client.is_client_connected)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_ssh.IBMSVCssh._svc_connect')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_ssh.IBMSVCssh._svc_disconnect')
+ def test_ssh_disconnect(self, connect_mock, disconnect_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'username': 'username',
+ 'password': 'password',
+ 'command': 'svcinfo lsuser',
+ })
+ conn = IBMSVCsshClient()
+ conn.is_client_connected = True
+ conn.ssh_client._svc_disconnect()
+ self.assertTrue(conn.ssh_client.is_client_connected)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_ssh.IBMSVCssh._svc_connect')
+ def test_ssh_disconnect_failed(self, connect_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'username': 'username',
+ 'password': 'password',
+ 'command': 'svcinfo lsuser',
+ })
+ conn = IBMSVCsshClient()
+ conn.ssh_client._svc_disconnect()
+ self.assertFalse(conn.ssh_client.is_client_connected)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_ssh.IBMSVCssh._svc_connect')
+ def test_ssh_modify_command(self, connect_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'username': 'username',
+ 'password': 'password',
+ 'command': 'svcinfo lsuser',
+ })
+ conn = IBMSVCsshClient()
+ new_command = conn.modify_command("svcinfo lsuser user1")
+ self.assertEqual(new_command, "svcinfo lsuser -json user1")
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_ssh.IBMSVCssh._svc_connect')
+ def test_ssh_modify_command_when_object_also_startswith_ls(self, connect_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'username': 'username',
+ 'password': 'password',
+ 'command': 'svcinfo lsuser lson',
+ })
+ conn = IBMSVCsshClient()
+ new_command = conn.modify_command("svcinfo lsuser lson")
+ self.assertEqual(new_command, "svcinfo lsuser -json lson")
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svctask_command.py b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svctask_command.py
new file mode 100644
index 000000000..85629e572
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/tests/unit/plugins/modules/test_ibm_svctask_command.py
@@ -0,0 +1,164 @@
+# Copyright (C) 2020 IBM CORPORATION
+# Author(s): Shilpi Jain <shilpi.jain1@ibm.com>
+#
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+""" unit tests IBM Storage Virtualize Ansible module: ibm_svctask_command """
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import unittest
+import pytest
+import json
+from mock import patch
+from ansible.module_utils.compat.paramiko import paramiko
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+from ansible_collections.ibm.storage_virtualize.plugins.modules.ibm_svctask_command import IBMSVCsshClient
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module
+ creation """
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the
+ test case """
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the
+ test case """
+ pass
+
+
+def exit_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over exit_json; package return data into an
+ exception """
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs): # pylint: disable=unused-argument
+ """function to patch over fail_json; package return data into an
+ exception """
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class TestIBMSVCsshClient_svctask(unittest.TestCase):
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+
+ def set_default_args(self):
+ return dict({
+ 'clustername': 'clustername',
+ 'username': 'username',
+ 'password': 'password',
+ 'command': 'svctask cli_command',
+ })
+
+ def test_ssh_connect_with_missing_username(self):
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({
+ 'clustername': 'clustername',
+ 'password': 'password',
+ 'command': 'svctask cli_command',
+ })
+ IBMSVCsshClient()
+ print('Info: %s' % exc.value.args[0]['msg'])
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ def test_ssh_connect_with_missing_password(self):
+ with pytest.raises(AnsibleFailJson) as exc:
+ set_module_args({
+ 'clustername': 'clustername',
+ 'username': 'username',
+ 'command': 'svctask cli_command',
+ })
+ IBMSVCsshClient()
+ print('Info: %s' % exc.value.args[0]['msg'])
+ self.assertFalse(exc.value.args[0]['changed'])
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_ssh.IBMSVCssh._svc_connect')
+ def test_ssh_connect_with_password(self, connect_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'username': 'username',
+ 'password': 'password',
+ 'command': 'svctask cli_command',
+ })
+ if paramiko is None:
+ print("paramiko is not installed")
+
+ patch.object(paramiko.SSHClient, 'exec_command')
+ conn = IBMSVCsshClient()
+ with pytest.raises(Exception) as exc:
+ conn.send_svctask_command()
+ print('Info: %s' % exc.value.args[0])
+ self.assertTrue(conn.ssh_client.is_client_connected)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_ssh.IBMSVCssh._svc_connect')
+ def test_ssh_connect_with_key(self, connect_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'username': 'username',
+ 'password': '',
+ 'usesshkey': 'yes',
+ 'command': 'svctask cli_command',
+ })
+ if paramiko is None:
+ print("paramiko is not installed")
+
+ patch.object(paramiko.SSHClient, 'exec_command')
+ conn = IBMSVCsshClient()
+ with pytest.raises(Exception) as exc:
+ conn.send_svctask_command()
+ print('Info: %s' % exc.value.args[0])
+ self.assertTrue(conn.ssh_client.is_client_connected)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_ssh.IBMSVCssh._svc_connect')
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_ssh.IBMSVCssh._svc_disconnect')
+ def test_ssh_disconnect(self, connect_mock, disconnect_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'username': 'username',
+ 'password': 'password',
+ 'command': 'svctask cli_command',
+ })
+ conn = IBMSVCsshClient()
+ conn.is_client_connected = True
+ conn.ssh_client._svc_disconnect()
+ self.assertTrue(conn.ssh_client.is_client_connected)
+
+ @patch('ansible_collections.ibm.storage_virtualize.plugins.module_utils.'
+ 'ibm_svc_ssh.IBMSVCssh._svc_connect')
+ def test_ssh_disconnect_failed(self, connect_mock):
+ set_module_args({
+ 'clustername': 'clustername',
+ 'username': 'username',
+ 'password': 'password',
+ 'command': 'svctask cli_command',
+ })
+ conn = IBMSVCsshClient()
+ conn.ssh_client._svc_disconnect()
+ self.assertFalse(conn.ssh_client.is_client_connected)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/ibm/storage_virtualize/tests/unit/requirements.txt b/ansible_collections/ibm/storage_virtualize/tests/unit/requirements.txt
new file mode 100644
index 000000000..162872bed
--- /dev/null
+++ b/ansible_collections/ibm/storage_virtualize/tests/unit/requirements.txt
@@ -0,0 +1,2 @@
+paramiko>= '3.4.0'
+cryptography >= '42.0.5'